Merge "camera: Add basic fuzz test for libcameraservice camera2 implementation." into sc-dev
diff --git a/camera/aidl/android/hardware/ICameraService.aidl b/camera/aidl/android/hardware/ICameraService.aidl
index 8af704d..459ad15 100644
--- a/camera/aidl/android/hardware/ICameraService.aidl
+++ b/camera/aidl/android/hardware/ICameraService.aidl
@@ -174,6 +174,13 @@
oneway void notifySystemEvent(int eventId, in int[] args);
/**
+ * Notify the camera service of a display configuration change.
+ *
+ * Callers require the android.permission.CAMERA_SEND_SYSTEM_EVENTS permission.
+ */
+ oneway void notifyDisplayConfigurationChange();
+
+ /**
* Notify the camera service of a device physical status change. May only be called from
* a privileged process.
*
diff --git a/camera/ndk/impl/ACameraMetadata.cpp b/camera/ndk/impl/ACameraMetadata.cpp
index 7387442..dab2fef 100644
--- a/camera/ndk/impl/ACameraMetadata.cpp
+++ b/camera/ndk/impl/ACameraMetadata.cpp
@@ -24,6 +24,28 @@
using namespace android;
+// Formats not listed in the public API, but still available to AImageReader
+// Enum value must match corresponding enum in ui/PublicFormat.h (which is not
+// available to VNDK)
+enum AIMAGE_PRIVATE_FORMATS {
+ /**
+ * Unprocessed implementation-dependent raw
+ * depth measurements, opaque with 16 bit
+ * samples.
+ *
+ */
+
+ AIMAGE_FORMAT_RAW_DEPTH = 0x1002,
+
+ /**
+ * Device specific 10 bits depth RAW image format.
+ *
+ * <p>Unprocessed implementation-dependent raw depth measurements, opaque with 10 bit samples
+ * and device specific bit layout.</p>
+ */
+ AIMAGE_FORMAT_RAW_DEPTH10 = 0x1003,
+};
+
/**
* ACameraMetadata Implementation
*/
@@ -290,6 +312,10 @@
format = AIMAGE_FORMAT_DEPTH_POINT_CLOUD;
} else if (format == HAL_PIXEL_FORMAT_Y16) {
format = AIMAGE_FORMAT_DEPTH16;
+ } else if (format == HAL_PIXEL_FORMAT_RAW16) {
+ format = static_cast<int32_t>(AIMAGE_FORMAT_RAW_DEPTH);
+ } else if (format == HAL_PIXEL_FORMAT_RAW10) {
+ format = static_cast<int32_t>(AIMAGE_FORMAT_RAW_DEPTH10);
}
filteredDepthStreamConfigs.push_back(format);
diff --git a/camera/ndk/include/camera/NdkCameraCaptureSession.h b/camera/ndk/include/camera/NdkCameraCaptureSession.h
index 6c1cf33..2b7f040 100644
--- a/camera/ndk/include/camera/NdkCameraCaptureSession.h
+++ b/camera/ndk/include/camera/NdkCameraCaptureSession.h
@@ -61,6 +61,10 @@
*/
typedef void (*ACameraCaptureSession_stateCallback)(void* context, ACameraCaptureSession *session);
+/**
+ * Capture session state callbacks used in {@link ACameraDevice_createCaptureSession} and
+ * {@link ACameraDevice_createCaptureSessionWithSessionParameters}
+ */
typedef struct ACameraCaptureSession_stateCallbacks {
/// optional application context.
void* context;
@@ -246,6 +250,10 @@
void* context, ACameraCaptureSession* session,
ACaptureRequest* request, ACameraWindowType* window, int64_t frameNumber);
+/**
+ * ACaptureCaptureSession_captureCallbacks structure used in
+ * {@link ACameraCaptureSession_capture} and {@link ACameraCaptureSession_setRepeatingRequest}.
+ */
typedef struct ACameraCaptureSession_captureCallbacks {
/// optional application context.
void* context;
@@ -413,7 +421,10 @@
*/
void ACameraCaptureSession_close(ACameraCaptureSession* session);
-struct ACameraDevice;
+/**
+ * ACameraDevice is opaque type that provides access to a camera device.
+ * A pointer can be obtained using {@link ACameraManager_openCamera} method.
+ */
typedef struct ACameraDevice ACameraDevice;
/**
@@ -591,6 +602,10 @@
camera_status_t ACameraCaptureSession_abortCaptures(ACameraCaptureSession* session)
__INTRODUCED_IN(24);
+/**
+ * Opaque object for capture session output, use {@link ACaptureSessionOutput_create} or
+ * {@link ACaptureSessionSharedOutput_create} to create an instance.
+ */
typedef struct ACaptureSessionOutput ACaptureSessionOutput;
/**
@@ -604,9 +619,9 @@
*
* <p>Native windows that get removed must not be part of any active repeating or single/burst
* request or have any pending results. Consider updating repeating requests via
- * {@link ACaptureSessionOutput_setRepeatingRequest} and then wait for the last frame number
+ * {@link ACameraCaptureSession_setRepeatingRequest} and then wait for the last frame number
* when the sequence completes
- * {@link ACameraCaptureSession_captureCallback#onCaptureSequenceCompleted}.</p>
+ * {@link ACameraCaptureSession_captureCallbacks#onCaptureSequenceCompleted}.</p>
*
* <p>Native windows that get added must not be part of any other registered ACaptureSessionOutput
* and must be compatible. Compatible windows must have matching format, rotation and
@@ -713,7 +728,15 @@
* Same as ACameraCaptureSession_captureCallbacks
*/
void* context;
+
+ /**
+ * Same as {@link ACameraCaptureSession_captureCallbacks#onCaptureStarted}.
+ */
ACameraCaptureSession_captureCallback_start onCaptureStarted;
+
+ /**
+ * Same as {@link ACameraCaptureSession_captureCallbacks#onCaptureProgressed}.
+ */
ACameraCaptureSession_captureCallback_result onCaptureProgressed;
/**
@@ -751,10 +774,18 @@
ACameraCaptureSession_logicalCamera_captureCallback_failed onLogicalCameraCaptureFailed;
/**
- * Same as ACameraCaptureSession_captureCallbacks
+ * Same as {@link ACameraCaptureSession_captureCallbacks#onCaptureSequenceCompleted}.
*/
ACameraCaptureSession_captureCallback_sequenceEnd onCaptureSequenceCompleted;
+
+ /**
+ * Same as {@link ACameraCaptureSession_captureCallbacks#onCaptureSequenceAborted}.
+ */
ACameraCaptureSession_captureCallback_sequenceAbort onCaptureSequenceAborted;
+
+ /**
+ * Same as {@link ACameraCaptureSession_captureCallbacks#onCaptureBufferLost}.
+ */
ACameraCaptureSession_captureCallback_bufferLost onCaptureBufferLost;
} ACameraCaptureSession_logicalCamera_captureCallbacks;
diff --git a/camera/ndk/include/camera/NdkCameraDevice.h b/camera/ndk/include/camera/NdkCameraDevice.h
index f72fe8d..7be4bd3 100644
--- a/camera/ndk/include/camera/NdkCameraDevice.h
+++ b/camera/ndk/include/camera/NdkCameraDevice.h
@@ -124,6 +124,10 @@
*/
typedef void (*ACameraDevice_ErrorStateCallback)(void* context, ACameraDevice* device, int error);
+/**
+ * Applications' callbacks for camera device state changes, register with
+ * {@link ACameraManager_openCamera}.
+ */
typedef struct ACameraDevice_StateCallbacks {
/// optional application context.
void* context;
@@ -198,6 +202,10 @@
*/
const char* ACameraDevice_getId(const ACameraDevice* device) __INTRODUCED_IN(24);
+/**
+ * Capture request pre-defined template types, used in {@link ACameraDevice_createCaptureRequest}
+ * and {@link ACameraDevice_createCaptureRequest_withPhysicalIds}.
+ */
typedef enum {
/**
* Create a request suitable for a camera preview window. Specifically, this
@@ -301,10 +309,12 @@
const ACameraDevice* device, ACameraDevice_request_template templateId,
/*out*/ACaptureRequest** request) __INTRODUCED_IN(24);
-
+/**
+ * Opaque object for CaptureSessionOutput container, use
+ * {@link ACaptureSessionOutputContainer_create} to create an instance.
+ */
typedef struct ACaptureSessionOutputContainer ACaptureSessionOutputContainer;
-typedef struct ACaptureSessionOutput ACaptureSessionOutput;
/**
* Create a capture session output container.
@@ -844,7 +854,7 @@
/*out*/ACaptureRequest** request) __INTRODUCED_IN(29);
/**
- * Check whether a particular {@ACaptureSessionOutputContainer} is supported by
+ * Check whether a particular {@link ACaptureSessionOutputContainer} is supported by
* the camera device.
*
* <p>This method performs a runtime check of a given {@link
@@ -875,6 +885,7 @@
* device.</li>
* <li>{@link ACAMERA_ERROR_UNSUPPORTED_OPERATION} if the query operation is not
* supported by the camera device.</li>
+ * </ul>
*/
camera_status_t ACameraDevice_isSessionConfigurationSupported(
const ACameraDevice* device,
diff --git a/camera/ndk/include/camera/NdkCameraError.h b/camera/ndk/include/camera/NdkCameraError.h
index 9d77eb4..26db7f2 100644
--- a/camera/ndk/include/camera/NdkCameraError.h
+++ b/camera/ndk/include/camera/NdkCameraError.h
@@ -40,7 +40,13 @@
__BEGIN_DECLS
+/**
+ * Camera status enum types.
+ */
typedef enum {
+ /**
+ * Camera operation has succeeded.
+ */
ACAMERA_OK = 0,
ACAMERA_ERROR_BASE = -10000,
diff --git a/camera/ndk/include/camera/NdkCameraManager.h b/camera/ndk/include/camera/NdkCameraManager.h
index be32b11..729182e 100644
--- a/camera/ndk/include/camera/NdkCameraManager.h
+++ b/camera/ndk/include/camera/NdkCameraManager.h
@@ -326,7 +326,7 @@
* @see ACameraManager_registerExtendedAvailabilityCallback
*/
typedef struct ACameraManager_ExtendedAvailabilityListener {
- ///
+ /// Called when a camera becomes available or unavailable
ACameraManager_AvailabilityCallbacks availabilityCallbacks;
/// Called when there is camera access permission change
diff --git a/camera/ndk/include/camera/NdkCameraMetadata.h b/camera/ndk/include/camera/NdkCameraMetadata.h
index 0d5e6c4..b331d50 100644
--- a/camera/ndk/include/camera/NdkCameraMetadata.h
+++ b/camera/ndk/include/camera/NdkCameraMetadata.h
@@ -256,10 +256,12 @@
/**
* Return a {@link ACameraMetadata} that references the same data as
- * {@link cameraMetadata}, which is an instance of
- * {@link android.hardware.camera2.CameraMetadata} (e.g., a
- * {@link android.hardware.camera2.CameraCharacteristics} or
- * {@link android.hardware.camera2.CaptureResult}).
+ * <a href="/reference/android/hardware/camera2/CameraMetadata">
+ * android.hardware.camera2.CameraMetadata</a> from Java API. (e.g., a
+ * <a href="/reference/android/hardware/camera2/CameraCharacteristics">
+ * android.hardware.camera2.CameraCharacteristics</a>
+ * or <a href="/reference/android/hardware/camera2/CaptureResult">
+ * android.hardware.camera2.CaptureResult</a>).
*
* <p>The returned ACameraMetadata must be freed by the application by {@link ACameraMetadata_free}
* after application is done using it.</p>
@@ -269,11 +271,13 @@
* the Java metadata is garbage collected.
*
* @param env the JNI environment.
- * @param cameraMetadata the source {@link android.hardware.camera2.CameraMetadata} from which the
+ * @param cameraMetadata the source <a href="/reference/android/hardware/camera2/CameraMetadata">
+ android.hardware.camera2.CameraMetadata </a>from which the
* returned {@link ACameraMetadata} is a view.
*
- * @return a valid ACameraMetadata pointer or NULL if {@link cameraMetadata} is null or not a valid
- * instance of {@link android.hardware.camera2.CameraMetadata}.
+ * @return a valid ACameraMetadata pointer or NULL if cameraMetadata is null or not a valid
+ * instance of <a href="android/hardware/camera2/CameraMetadata">
+ * android.hardware.camera2.CameraMetadata</a>.
*
*/
ACameraMetadata* ACameraMetadata_fromCameraMetadata(JNIEnv* env, jobject cameraMetadata)
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 70ce864..20ffd48 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -1868,7 +1868,7 @@
* <li>If the camera device has BURST_CAPTURE capability, the frame rate requirement of
* BURST_CAPTURE must still be met.</li>
* <li>All streams not larger than the maximum streaming dimension for BOKEH_STILL_CAPTURE mode
- * (queried via {@link ACAMERA_CONTROL_AVAILABLE_EXTENDED_SCENE_MODE_CAPABILITIES })
+ * (queried via {@link ACAMERA_CONTROL_AVAILABLE_EXTENDED_SCENE_MODE_MAX_SIZES })
* will have preview bokeh effect applied.</li>
* </ul>
* <p>When set to BOKEH_CONTINUOUS mode, configured streams dimension should not exceed this mode's
@@ -3502,7 +3502,7 @@
* preCorrectionActiveArraySize covers the camera device's field of view "after" zoom. See
* ACAMERA_CONTROL_ZOOM_RATIO for details.</p>
* <p>For camera devices with the
- * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
+ * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
* capability, ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE_MAXIMUM_RESOLUTION /
* ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE_MAXIMUM_RESOLUTION must be used as the
* coordinate system for requests where ACAMERA_SENSOR_PIXEL_MODE is set to
@@ -3964,7 +3964,7 @@
* configurations which belong to this physical camera, and it will advertise and will only
* advertise the maximum supported resolutions for a particular format.</p>
* <p>If this camera device isn't a physical camera device constituting a logical camera,
- * but a standalone <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
+ * but a standalone <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
* camera, this field represents the multi-resolution input/output stream configurations of
* default mode and max resolution modes. The sizes will be the maximum resolution of a
* particular format for default mode and max resolution mode.</p>
@@ -4867,12 +4867,12 @@
* <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#SENSOR_PIXEL_MODE_DEFAULT">CameraMetadata#SENSOR_PIXEL_MODE_DEFAULT</a> mode.
* When operating in
* <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#SENSOR_PIXEL_MODE_DEFAULT">CameraMetadata#SENSOR_PIXEL_MODE_DEFAULT</a> mode, sensors
- * with <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
+ * with <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
* capability would typically perform pixel binning in order to improve low light
* performance, noise reduction etc. However, in
* <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION">CameraMetadata#SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION</a>
* mode (supported only
- * by <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
+ * by <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
* sensors), sensors typically operate in unbinned mode allowing for a larger image size.
* The stream configurations supported in
* <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION">CameraMetadata#SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION</a>
@@ -4905,7 +4905,7 @@
* </ul></p>
*
* <p>This key will only be present in devices advertisting the
- * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
+ * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
* capability which also advertise <code>REMOSAIC_REPROCESSING</code> capability. On all other devices
* RAW targets will have a regular bayer pattern.</p>
*/
@@ -5231,7 +5231,7 @@
* <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION">CameraMetadata#SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION</a>
* counterparts.
* This key will only be present for devices which advertise the
- * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
+ * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
* capability.</p>
* <p>The data representation is <code>int[4]</code>, which maps to <code>(left, top, width, height)</code>.</p>
*
@@ -5263,7 +5263,7 @@
* is, when ACAMERA_SENSOR_PIXEL_MODE is set to
* <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION">CameraMetadata#SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION</a>.
* This key will only be present for devices which advertise the
- * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
+ * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
* capability.</p>
*
* @see ACAMERA_SENSOR_INFO_PHYSICAL_SIZE
@@ -5291,7 +5291,7 @@
* when ACAMERA_SENSOR_PIXEL_MODE is set to
* <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION">CameraMetadata#SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION</a>.
* This key will only be present for devices which advertise the
- * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
+ * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
* capability.</p>
* <p>The data representation is <code>int[4]</code>, which maps to <code>(left, top, width, height)</code>.</p>
*
@@ -5321,7 +5321,7 @@
* <p>This key will not be present if REMOSAIC_REPROCESSING is not supported, since RAW images
* will have a regular bayer pattern.</p>
* <p>This key will not be present for sensors which don't have the
- * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
+ * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>
* capability.</p>
*/
ACAMERA_SENSOR_INFO_BINNING_FACTOR = // int32[2]
@@ -9264,13 +9264,13 @@
/**
* <p>This is the default sensor pixel mode. This is the only sensor pixel mode
* supported unless a camera device advertises
- * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>.</p>
+ * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>.</p>
*/
ACAMERA_SENSOR_PIXEL_MODE_DEFAULT = 0,
/**
* <p>This sensor pixel mode is offered by devices with capability
- * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILTIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>.
+ * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR">CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_ULTRA_HIGH_RESOLUTION_SENSOR</a>.
* In this mode, sensors typically do not bin pixels, as a result can offer larger
* image sizes.</p>
*/
diff --git a/camera/ndk/include/camera/NdkCameraWindowType.h b/camera/ndk/include/camera/NdkCameraWindowType.h
index df977da..0838fba 100644
--- a/camera/ndk/include/camera/NdkCameraWindowType.h
+++ b/camera/ndk/include/camera/NdkCameraWindowType.h
@@ -50,4 +50,6 @@
typedef ANativeWindow ACameraWindowType;
#endif
+/** @} */
+
#endif //_NDK_CAMERA_WINDOW_TYPE_H
diff --git a/camera/ndk/include/camera/NdkCaptureRequest.h b/camera/ndk/include/camera/NdkCaptureRequest.h
index a4dc374..d83c5b3 100644
--- a/camera/ndk/include/camera/NdkCaptureRequest.h
+++ b/camera/ndk/include/camera/NdkCaptureRequest.h
@@ -44,10 +44,10 @@
__BEGIN_DECLS
-// Container for output targets
+/** Container for output targets */
typedef struct ACameraOutputTargets ACameraOutputTargets;
-// Container for a single output target
+/** Container for a single output target */
typedef struct ACameraOutputTarget ACameraOutputTarget;
/**
@@ -383,10 +383,10 @@
* Set/change a camera capture control entry with unsigned 8 bits data type for
* a physical camera backing a logical multi-camera device.
*
- * <p>Same as ACaptureRequest_setEntry_u8, except that if {@link tag} is contained
+ * <p>Same as ACaptureRequest_setEntry_u8, except that if tag is contained
* in {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, this function
* sets the entry for a particular physical sub-camera backing the logical multi-camera.
- * If {@link tag} is not contained in
+ * If tag is not contained in
* {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, the key will be ignored
* by the camera device.</p>
*
@@ -413,10 +413,10 @@
* Set/change a camera capture control entry with signed 32 bits data type for
* a physical camera of a logical multi-camera device.
*
- * <p>Same as ACaptureRequest_setEntry_i32, except that if {@link tag} is contained
+ * <p>Same as ACaptureRequest_setEntry_i32, except that if tag is contained
* in {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, this function
* sets the entry for a particular physical sub-camera backing the logical multi-camera.
- * If {@link tag} is not contained in
+ * If tag is not contained in
* {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, the key will be ignored
* by the camera device.</p>
*
@@ -443,10 +443,10 @@
* Set/change a camera capture control entry with float data type for
* a physical camera of a logical multi-camera device.
*
- * <p>Same as ACaptureRequest_setEntry_float, except that if {@link tag} is contained
+ * <p>Same as ACaptureRequest_setEntry_float, except that if tag is contained
* in {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, this function
* sets the entry for a particular physical sub-camera backing the logical multi-camera.
- * If {@link tag} is not contained in
+ * If tag is not contained in
* {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, the key will be ignored
* by the camera device.</p>
*
@@ -473,10 +473,10 @@
* Set/change a camera capture control entry with signed 64 bits data type for
* a physical camera of a logical multi-camera device.
*
- * <p>Same as ACaptureRequest_setEntry_i64, except that if {@link tag} is contained
+ * <p>Same as ACaptureRequest_setEntry_i64, except that if tag is contained
* in {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, this function
* sets the entry for a particular physical sub-camera backing the logical multi-camera.
- * If {@link tag} is not contained in
+ * If tag is not contained in
* {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, the key will be ignored
* by the camera device.</p>
*
@@ -503,10 +503,10 @@
* Set/change a camera capture control entry with double data type for
* a physical camera of a logical multi-camera device.
*
- * <p>Same as ACaptureRequest_setEntry_double, except that if {@link tag} is contained
+ * <p>Same as ACaptureRequest_setEntry_double, except that if tag is contained
* in {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, this function
* sets the entry for a particular physical sub-camera backing the logical multi-camera.
- * If {@link tag} is not contained in
+ * If tag is not contained in
* {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, the key will be ignored
* by the camera device.</p>
*
@@ -533,10 +533,10 @@
* Set/change a camera capture control entry with rational data type for
* a physical camera of a logical multi-camera device.
*
- * <p>Same as ACaptureRequest_setEntry_rational, except that if {@link tag} is contained
+ * <p>Same as ACaptureRequest_setEntry_rational, except that if tag is contained
* in {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, this function
* sets the entry for a particular physical sub-camera backing the logical multi-camera.
- * If {@link tag} is not contained in
+ * If tag is not contained in
* {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, the key will be ignored
* by the camera device.</p>
*
diff --git a/drm/libmediadrm/interface/mediadrm/DrmUtils.h b/drm/libmediadrm/interface/mediadrm/DrmUtils.h
index 988cda9..ec0b878 100644
--- a/drm/libmediadrm/interface/mediadrm/DrmUtils.h
+++ b/drm/libmediadrm/interface/mediadrm/DrmUtils.h
@@ -211,7 +211,7 @@
}
auto allLogs(gLogBuf.getLogs());
- LOG2BI("framework logs size %zu; plugin logs size %zu",
+ LOG2BD("framework logs size %zu; plugin logs size %zu",
allLogs.size(), pluginLogs.size());
std::copy(pluginLogs.begin(), pluginLogs.end(), std::back_inserter(allLogs));
std::sort(allLogs.begin(), allLogs.end(),
diff --git a/drm/mediacas/plugins/clearkey/ClearKeySessionLibrary.h b/drm/mediacas/plugins/clearkey/ClearKeySessionLibrary.h
index a537e63..7c6d86c 100644
--- a/drm/mediacas/plugins/clearkey/ClearKeySessionLibrary.h
+++ b/drm/mediacas/plugins/clearkey/ClearKeySessionLibrary.h
@@ -22,7 +22,6 @@
#include <openssl/aes.h>
#include <utils/KeyedVector.h>
#include <utils/Mutex.h>
-#include <utils/RefBase.h>
namespace android {
struct ABuffer;
@@ -30,7 +29,7 @@
namespace clearkeycas {
class KeyFetcher;
-class ClearKeyCasSession : public RefBase {
+class ClearKeyCasSession {
public:
explicit ClearKeyCasSession(CasPlugin *plugin);
diff --git a/drm/mediadrm/plugins/clearkey/default/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/default/DrmPlugin.cpp
index 6ac3510..089eb1c 100644
--- a/drm/mediadrm/plugins/clearkey/default/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/default/DrmPlugin.cpp
@@ -207,6 +207,7 @@
}
infoMap.clear();
+ android::Mutex::Autolock lock(mPlayPolicyLock);
for (size_t i = 0; i < mPlayPolicy.size(); ++i) {
infoMap.add(mPlayPolicy.keyAt(i), mPlayPolicy.valueAt(i));
}
diff --git a/drm/mediadrm/plugins/clearkey/default/include/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/default/include/DrmPlugin.h
index aa9b59d..95f15ca 100644
--- a/drm/mediadrm/plugins/clearkey/default/include/DrmPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/default/include/DrmPlugin.h
@@ -262,7 +262,7 @@
void initProperties();
void setPlayPolicy();
- android::Mutex mPlayPolicyLock;
+ mutable android::Mutex mPlayPolicyLock;
android::KeyedVector<String8, String8> mPlayPolicy;
android::KeyedVector<String8, String8> mStringProperties;
android::KeyedVector<String8, Vector<uint8_t>> mByteArrayProperties;
diff --git a/drm/mediadrm/plugins/clearkey/hidl/Android.bp b/drm/mediadrm/plugins/clearkey/hidl/Android.bp
index e6e1f80..c49d5fe 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/Android.bp
+++ b/drm/mediadrm/plugins/clearkey/hidl/Android.bp
@@ -50,7 +50,7 @@
relative_install_path: "hw",
- cflags: ["-Wall", "-Werror"],
+ cflags: ["-Wall", "-Werror", "-Wthread-safety"],
shared_libs: [
"android.hardware.drm@1.0",
diff --git a/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp b/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp
index d278633..302dd39 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp
@@ -37,6 +37,8 @@
sp<IMemory> hidlMemory = mapMemory(base);
ALOGE_IF(hidlMemory == nullptr, "mapMemory returns nullptr");
+ std::lock_guard<std::mutex> shared_buffer_lock(mSharedBufferLock);
+
// allow mapMemory to return nullptr
mSharedBufferMap[bufferId] = hidlMemory;
return Void();
@@ -94,6 +96,7 @@
return Void();
}
+ std::unique_lock<std::mutex> shared_buffer_lock(mSharedBufferLock);
if (mSharedBufferMap.find(source.bufferId) == mSharedBufferMap.end()) {
_hidl_cb(Status_V1_2::ERROR_DRM_CANNOT_HANDLE, 0,
"source decrypt buffer base not set");
@@ -142,12 +145,17 @@
base = static_cast<uint8_t *>(static_cast<void *>(destBase->getPointer()));
- if (destBuffer.offset + destBuffer.size > destBase->getSize()) {
+ totalSize = 0;
+ if (__builtin_add_overflow(destBuffer.offset, destBuffer.size, &totalSize) ||
+ totalSize > destBase->getSize()) {
+ android_errorWriteLog(0x534e4554, "176444622");
_hidl_cb(Status_V1_2::ERROR_DRM_FRAME_TOO_LARGE, 0, "invalid buffer size");
return Void();
}
- destPtr = static_cast<void *>(base + destination.nonsecureMemory.offset);
+ destPtr = static_cast<void*>(base + destination.nonsecureMemory.offset);
+ // release mSharedBufferLock
+ shared_buffer_lock.unlock();
// Calculate the output buffer size and determine if any subsamples are
// encrypted.
diff --git a/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
index a77759e..6f69110 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
@@ -220,6 +220,7 @@
if (requestString.find(kOfflineLicense) != std::string::npos) {
std::string emptyResponse;
std::string keySetIdString(keySetId.begin(), keySetId.end());
+ Mutex::Autolock lock(mFileHandleLock);
if (!mFileHandle.StoreLicense(keySetIdString,
DeviceFiles::kLicenseStateReleasing,
emptyResponse)) {
@@ -335,6 +336,7 @@
}
*keySetId = kKeySetIdPrefix + ByteArrayToHexString(
reinterpret_cast<const uint8_t*>(randomData.data()), randomData.size());
+ Mutex::Autolock lock(mFileHandleLock);
if (mFileHandle.LicenseExists(*keySetId)) {
// collision, regenerate
ALOGV("Retry generating KeySetId");
@@ -392,6 +394,7 @@
if (status == Status::OK) {
if (isOfflineLicense) {
if (isRelease) {
+ Mutex::Autolock lock(mFileHandleLock);
mFileHandle.DeleteLicense(keySetId);
mSessionLibrary->destroySession(session);
} else {
@@ -400,6 +403,7 @@
return Void();
}
+ Mutex::Autolock lock(mFileHandleLock);
bool ok = mFileHandle.StoreLicense(
keySetId,
DeviceFiles::kLicenseStateActive,
@@ -454,6 +458,7 @@
DeviceFiles::LicenseState licenseState;
std::string offlineLicense;
Status status = Status::OK;
+ Mutex::Autolock lock(mFileHandleLock);
if (!mFileHandle.RetrieveLicense(std::string(keySetId.begin(), keySetId.end()),
&licenseState, &offlineLicense)) {
ALOGE("Failed to restore offline license");
@@ -576,7 +581,6 @@
Return<void> DrmPlugin::queryKeyStatus(
const hidl_vec<uint8_t>& sessionId,
queryKeyStatus_cb _hidl_cb) {
-
if (sessionId.size() == 0) {
// Returns empty key status KeyValue pair
_hidl_cb(Status::BAD_VALUE, hidl_vec<KeyValue>());
@@ -586,12 +590,14 @@
std::vector<KeyValue> infoMapVec;
infoMapVec.clear();
+ mPlayPolicyLock.lock();
KeyValue keyValuePair;
for (size_t i = 0; i < mPlayPolicy.size(); ++i) {
keyValuePair.key = mPlayPolicy[i].key;
keyValuePair.value = mPlayPolicy[i].value;
infoMapVec.push_back(keyValuePair);
}
+ mPlayPolicyLock.unlock();
_hidl_cb(Status::OK, toHidlVec(infoMapVec));
return Void();
}
@@ -704,6 +710,8 @@
}
Return<void> DrmPlugin::getOfflineLicenseKeySetIds(getOfflineLicenseKeySetIds_cb _hidl_cb) {
+ Mutex::Autolock lock(mFileHandleLock);
+
std::vector<std::string> licenseNames = mFileHandle.ListLicenses();
std::vector<KeySetId> keySetIds;
if (mMockError != Status_V1_2::OK) {
@@ -724,6 +732,7 @@
return toStatus_1_0(mMockError);
}
std::string licenseName(keySetId.begin(), keySetId.end());
+ Mutex::Autolock lock(mFileHandleLock);
if (mFileHandle.DeleteLicense(licenseName)) {
return Status::OK;
}
@@ -732,6 +741,8 @@
Return<void> DrmPlugin::getOfflineLicenseState(const KeySetId& keySetId,
getOfflineLicenseState_cb _hidl_cb) {
+ Mutex::Autolock lock(mFileHandleLock);
+
std::string licenseName(keySetId.begin(), keySetId.end());
DeviceFiles::LicenseState state;
std::string license;
diff --git a/drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp b/drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp
index 051a968..32cf2dc 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp
@@ -24,11 +24,13 @@
}
bool MemoryFileSystem::FileExists(const std::string& fileName) const {
+ std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
auto result = mMemoryFileSystem.find(fileName);
return result != mMemoryFileSystem.end();
}
ssize_t MemoryFileSystem::GetFileSize(const std::string& fileName) const {
+ std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
auto result = mMemoryFileSystem.find(fileName);
if (result != mMemoryFileSystem.end()) {
return static_cast<ssize_t>(result->second.getFileSize());
@@ -40,6 +42,7 @@
std::vector<std::string> MemoryFileSystem::ListFiles() const {
std::vector<std::string> list;
+ std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
for (const auto& filename : mMemoryFileSystem) {
list.push_back(filename.first);
}
@@ -48,6 +51,7 @@
size_t MemoryFileSystem::Read(const std::string& path, std::string* buffer) {
std::string key = GetFileName(path);
+ std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
auto result = mMemoryFileSystem.find(key);
if (result != mMemoryFileSystem.end()) {
std::string serializedHashFile = result->second.getContent();
@@ -61,6 +65,7 @@
size_t MemoryFileSystem::Write(const std::string& path, const MemoryFile& memoryFile) {
std::string key = GetFileName(path);
+ std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
auto result = mMemoryFileSystem.find(key);
if (result != mMemoryFileSystem.end()) {
mMemoryFileSystem.erase(key);
@@ -70,6 +75,7 @@
}
bool MemoryFileSystem::RemoveFile(const std::string& fileName) {
+ std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
auto result = mMemoryFileSystem.find(fileName);
if (result != mMemoryFileSystem.end()) {
mMemoryFileSystem.erase(result);
@@ -81,6 +87,7 @@
}
bool MemoryFileSystem::RemoveAllFiles() {
+ std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
mMemoryFileSystem.clear();
return mMemoryFileSystem.empty();
}
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/CryptoPlugin.h b/drm/mediadrm/plugins/clearkey/hidl/include/CryptoPlugin.h
index 8680f0c..23a64fa 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/include/CryptoPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/CryptoPlugin.h
@@ -20,6 +20,8 @@
#include <android/hardware/drm/1.2/ICryptoPlugin.h>
#include <android/hidl/memory/1.0/IMemory.h>
+#include <mutex>
+
#include "ClearKeyTypes.h"
#include "Session.h"
#include "Utils.h"
@@ -93,7 +95,7 @@
const SharedBuffer& source,
uint64_t offset,
const DestinationBuffer& destination,
- decrypt_1_2_cb _hidl_cb);
+ decrypt_1_2_cb _hidl_cb) NO_THREAD_SAFETY_ANALYSIS; // use unique_lock
Return<void> setSharedBufferBase(const hidl_memory& base,
uint32_t bufferId);
@@ -105,7 +107,8 @@
private:
CLEARKEY_DISALLOW_COPY_AND_ASSIGN(CryptoPlugin);
- std::map<uint32_t, sp<IMemory> > mSharedBufferMap;
+ std::mutex mSharedBufferLock;
+ std::map<uint32_t, sp<IMemory>> mSharedBufferMap GUARDED_BY(mSharedBufferLock);
sp<Session> mSession;
Status mInitStatus;
};
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
index 076beb8..894985b 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
@@ -416,7 +416,8 @@
mMockError = Status_V1_2::OK;
}
- DeviceFiles mFileHandle;
+ DeviceFiles mFileHandle GUARDED_BY(mFileHandleLock);
+ Mutex mFileHandleLock;
Mutex mSecureStopLock;
CLEARKEY_DISALLOW_COPY_AND_ASSIGN_AND_NEW(DrmPlugin);
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/MemoryFileSystem.h b/drm/mediadrm/plugins/clearkey/hidl/include/MemoryFileSystem.h
index bcd9fd6..6ac0e2c 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/include/MemoryFileSystem.h
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/MemoryFileSystem.h
@@ -5,7 +5,9 @@
#ifndef CLEARKEY_MEMORY_FILE_SYSTEM_H_
#define CLEARKEY_MEMORY_FILE_SYSTEM_H_
+#include <android-base/thread_annotations.h>
#include <map>
+#include <mutex>
#include <string>
#include "ClearKeyTypes.h"
@@ -49,10 +51,12 @@
size_t Write(const std::string& pathName, const MemoryFile& memoryFile);
private:
+ mutable std::mutex mMemoryFileSystemLock;
+
// License file name is made up of a unique keySetId, therefore,
// the filename can be used as the key to locate licenses in the
// memory file system.
- std::map<std::string, MemoryFile> mMemoryFileSystem;
+ std::map<std::string, MemoryFile> mMemoryFileSystem GUARDED_BY(mMemoryFileSystemLock);
std::string GetFileName(const std::string& path);
diff --git a/media/codec2/components/aac/C2SoftAacEnc.cpp b/media/codec2/components/aac/C2SoftAacEnc.cpp
index ea76cbb..d865ab2 100644
--- a/media/codec2/components/aac/C2SoftAacEnc.cpp
+++ b/media/codec2/components/aac/C2SoftAacEnc.cpp
@@ -272,8 +272,9 @@
return UNKNOWN_ERROR;
}
- if (sbrMode != -1 && aacProfile == C2Config::PROFILE_AAC_ELD) {
- if (AACENC_OK != aacEncoder_SetParam(mAACEncoder, AACENC_SBR_MODE, sbrMode)) {
+ if (sbrMode != C2Config::AAC_SBR_AUTO && aacProfile == C2Config::PROFILE_AAC_ELD) {
+ int aacSbrMode = sbrMode != C2Config::AAC_SBR_OFF;
+ if (AACENC_OK != aacEncoder_SetParam(mAACEncoder, AACENC_SBR_MODE, aacSbrMode)) {
ALOGE("Failed to set AAC encoder parameters");
return UNKNOWN_ERROR;
}
diff --git a/media/codec2/components/avc/C2SoftAvcDec.cpp b/media/codec2/components/avc/C2SoftAvcDec.cpp
index 0207311..e8287f9 100644
--- a/media/codec2/components/avc/C2SoftAvcDec.cpp
+++ b/media/codec2/components/avc/C2SoftAvcDec.cpp
@@ -26,7 +26,6 @@
#include <SimpleC2Interface.h>
#include "C2SoftAvcDec.h"
-#include "ih264d.h"
namespace android {
@@ -391,12 +390,14 @@
}
while (true) {
- ivd_video_decode_ip_t s_decode_ip;
- ivd_video_decode_op_t s_decode_op;
+ ih264d_video_decode_ip_t s_h264d_decode_ip = {};
+ ih264d_video_decode_op_t s_h264d_decode_op = {};
+ ivd_video_decode_ip_t *ps_decode_ip = &s_h264d_decode_ip.s_ivd_video_decode_ip_t;
+ ivd_video_decode_op_t *ps_decode_op = &s_h264d_decode_op.s_ivd_video_decode_op_t;
- setDecodeArgs(&s_decode_ip, &s_decode_op, nullptr, nullptr, 0, 0, 0);
- (void) ivdec_api_function(mDecHandle, &s_decode_ip, &s_decode_op);
- if (0 == s_decode_op.u4_output_present) {
+ setDecodeArgs(ps_decode_ip, ps_decode_op, nullptr, nullptr, 0, 0, 0);
+ (void) ivdec_api_function(mDecHandle, &s_h264d_decode_ip, &s_h264d_decode_op);
+ if (0 == ps_decode_op->u4_output_present) {
resetPlugin();
break;
}
@@ -411,8 +412,8 @@
}
status_t C2SoftAvcDec::createDecoder() {
- ivdext_create_ip_t s_create_ip;
- ivdext_create_op_t s_create_op;
+ ivdext_create_ip_t s_create_ip = {};
+ ivdext_create_op_t s_create_op = {};
s_create_ip.s_ivd_create_ip_t.u4_size = sizeof(ivdext_create_ip_t);
s_create_ip.s_ivd_create_ip_t.e_cmd = IVD_CMD_CREATE;
@@ -438,8 +439,8 @@
}
status_t C2SoftAvcDec::setNumCores() {
- ivdext_ctl_set_num_cores_ip_t s_set_num_cores_ip;
- ivdext_ctl_set_num_cores_op_t s_set_num_cores_op;
+ ivdext_ctl_set_num_cores_ip_t s_set_num_cores_ip = {};
+ ivdext_ctl_set_num_cores_op_t s_set_num_cores_op = {};
s_set_num_cores_ip.u4_size = sizeof(ivdext_ctl_set_num_cores_ip_t);
s_set_num_cores_ip.e_cmd = IVD_CMD_VIDEO_CTL;
@@ -458,22 +459,26 @@
}
status_t C2SoftAvcDec::setParams(size_t stride, IVD_VIDEO_DECODE_MODE_T dec_mode) {
- ivd_ctl_set_config_ip_t s_set_dyn_params_ip;
- ivd_ctl_set_config_op_t s_set_dyn_params_op;
+ ih264d_ctl_set_config_ip_t s_h264d_set_dyn_params_ip = {};
+ ih264d_ctl_set_config_op_t s_h264d_set_dyn_params_op = {};
+ ivd_ctl_set_config_ip_t *ps_set_dyn_params_ip =
+ &s_h264d_set_dyn_params_ip.s_ivd_ctl_set_config_ip_t;
+ ivd_ctl_set_config_op_t *ps_set_dyn_params_op =
+ &s_h264d_set_dyn_params_op.s_ivd_ctl_set_config_op_t;
- s_set_dyn_params_ip.u4_size = sizeof(ivd_ctl_set_config_ip_t);
- s_set_dyn_params_ip.e_cmd = IVD_CMD_VIDEO_CTL;
- s_set_dyn_params_ip.e_sub_cmd = IVD_CMD_CTL_SETPARAMS;
- s_set_dyn_params_ip.u4_disp_wd = (UWORD32) stride;
- s_set_dyn_params_ip.e_frm_skip_mode = IVD_SKIP_NONE;
- s_set_dyn_params_ip.e_frm_out_mode = IVD_DISPLAY_FRAME_OUT;
- s_set_dyn_params_ip.e_vid_dec_mode = dec_mode;
- s_set_dyn_params_op.u4_size = sizeof(ivd_ctl_set_config_op_t);
+ ps_set_dyn_params_ip->u4_size = sizeof(ih264d_ctl_set_config_ip_t);
+ ps_set_dyn_params_ip->e_cmd = IVD_CMD_VIDEO_CTL;
+ ps_set_dyn_params_ip->e_sub_cmd = IVD_CMD_CTL_SETPARAMS;
+ ps_set_dyn_params_ip->u4_disp_wd = (UWORD32) stride;
+ ps_set_dyn_params_ip->e_frm_skip_mode = IVD_SKIP_NONE;
+ ps_set_dyn_params_ip->e_frm_out_mode = IVD_DISPLAY_FRAME_OUT;
+ ps_set_dyn_params_ip->e_vid_dec_mode = dec_mode;
+ ps_set_dyn_params_op->u4_size = sizeof(ih264d_ctl_set_config_op_t);
IV_API_CALL_STATUS_T status = ivdec_api_function(mDecHandle,
- &s_set_dyn_params_ip,
- &s_set_dyn_params_op);
+ &s_h264d_set_dyn_params_ip,
+ &s_h264d_set_dyn_params_op);
if (status != IV_SUCCESS) {
- ALOGE("error in %s: 0x%x", __func__, s_set_dyn_params_op.u4_error_code);
+ ALOGE("error in %s: 0x%x", __func__, ps_set_dyn_params_op->u4_error_code);
return UNKNOWN_ERROR;
}
@@ -481,8 +486,8 @@
}
void C2SoftAvcDec::getVersion() {
- ivd_ctl_getversioninfo_ip_t s_get_versioninfo_ip;
- ivd_ctl_getversioninfo_op_t s_get_versioninfo_op;
+ ivd_ctl_getversioninfo_ip_t s_get_versioninfo_ip = {};
+ ivd_ctl_getversioninfo_op_t s_get_versioninfo_op = {};
UWORD8 au1_buf[512];
s_get_versioninfo_ip.u4_size = sizeof(ivd_ctl_getversioninfo_ip_t);
@@ -538,7 +543,7 @@
if (OK != setParams(mStride, IVD_DECODE_FRAME)) return false;
}
- ps_decode_ip->u4_size = sizeof(ivd_video_decode_ip_t);
+ ps_decode_ip->u4_size = sizeof(ih264d_video_decode_ip_t);
ps_decode_ip->e_cmd = IVD_CMD_VIDEO_DECODE;
if (inBuffer) {
ps_decode_ip->u4_ts = tsMarker;
@@ -567,14 +572,14 @@
ps_decode_ip->s_out_buffer.pu1_bufs[2] = mOutBufferFlush + lumaSize + chromaSize;
}
ps_decode_ip->s_out_buffer.u4_num_bufs = 3;
- ps_decode_op->u4_size = sizeof(ivd_video_decode_op_t);
+ ps_decode_op->u4_size = sizeof(ih264d_video_decode_op_t);
return true;
}
bool C2SoftAvcDec::getVuiParams() {
- ivdext_ctl_get_vui_params_ip_t s_get_vui_params_ip;
- ivdext_ctl_get_vui_params_op_t s_get_vui_params_op;
+ ivdext_ctl_get_vui_params_ip_t s_get_vui_params_ip = {};
+ ivdext_ctl_get_vui_params_op_t s_get_vui_params_op = {};
s_get_vui_params_ip.u4_size = sizeof(ivdext_ctl_get_vui_params_ip_t);
s_get_vui_params_ip.e_cmd = IVD_CMD_VIDEO_CTL;
@@ -622,8 +627,8 @@
}
status_t C2SoftAvcDec::setFlushMode() {
- ivd_ctl_flush_ip_t s_set_flush_ip;
- ivd_ctl_flush_op_t s_set_flush_op;
+ ivd_ctl_flush_ip_t s_set_flush_ip = {};
+ ivd_ctl_flush_op_t s_set_flush_op = {};
s_set_flush_ip.u4_size = sizeof(ivd_ctl_flush_ip_t);
s_set_flush_ip.e_cmd = IVD_CMD_VIDEO_CTL;
@@ -641,8 +646,8 @@
}
status_t C2SoftAvcDec::resetDecoder() {
- ivd_ctl_reset_ip_t s_reset_ip;
- ivd_ctl_reset_op_t s_reset_op;
+ ivd_ctl_reset_ip_t s_reset_ip = {};
+ ivd_ctl_reset_op_t s_reset_op = {};
s_reset_ip.u4_size = sizeof(ivd_ctl_reset_ip_t);
s_reset_ip.e_cmd = IVD_CMD_VIDEO_CTL;
@@ -671,8 +676,8 @@
status_t C2SoftAvcDec::deleteDecoder() {
if (mDecHandle) {
- ivdext_delete_ip_t s_delete_ip;
- ivdext_delete_op_t s_delete_op;
+ ivdext_delete_ip_t s_delete_ip = {};
+ ivdext_delete_op_t s_delete_op = {};
s_delete_ip.s_ivd_delete_ip_t.u4_size = sizeof(ivdext_delete_ip_t);
s_delete_ip.s_ivd_delete_ip_t.e_cmd = IVD_CMD_DELETE;
@@ -837,8 +842,10 @@
return;
}
- ivd_video_decode_ip_t s_decode_ip;
- ivd_video_decode_op_t s_decode_op;
+ ih264d_video_decode_ip_t s_h264d_decode_ip = {};
+ ih264d_video_decode_op_t s_h264d_decode_op = {};
+ ivd_video_decode_ip_t *ps_decode_ip = &s_h264d_decode_ip.s_ivd_video_decode_ip_t;
+ ivd_video_decode_op_t *ps_decode_op = &s_h264d_decode_op.s_ivd_video_decode_op_t;
{
C2GraphicView wView = mOutBlock->map().get();
if (wView.error()) {
@@ -846,7 +853,7 @@
work->result = wView.error();
return;
}
- if (!setDecodeArgs(&s_decode_ip, &s_decode_op, &rView, &wView,
+ if (!setDecodeArgs(ps_decode_ip, ps_decode_op, &rView, &wView,
inOffset + inPos, inSize - inPos, workIndex)) {
mSignalledError = true;
work->workletsProcessed = 1u;
@@ -862,26 +869,27 @@
WORD32 delay;
GETTIME(&mTimeStart, nullptr);
TIME_DIFF(mTimeEnd, mTimeStart, delay);
- (void) ivdec_api_function(mDecHandle, &s_decode_ip, &s_decode_op);
+ (void) ivdec_api_function(mDecHandle, &s_h264d_decode_ip, &s_h264d_decode_op);
WORD32 decodeTime;
GETTIME(&mTimeEnd, nullptr);
TIME_DIFF(mTimeStart, mTimeEnd, decodeTime);
ALOGV("decodeTime=%6d delay=%6d numBytes=%6d", decodeTime, delay,
- s_decode_op.u4_num_bytes_consumed);
+ ps_decode_op->u4_num_bytes_consumed);
}
- if (IVD_MEM_ALLOC_FAILED == (s_decode_op.u4_error_code & IVD_ERROR_MASK)) {
+ if (IVD_MEM_ALLOC_FAILED == (ps_decode_op->u4_error_code & IVD_ERROR_MASK)) {
ALOGE("allocation failure in decoder");
mSignalledError = true;
work->workletsProcessed = 1u;
work->result = C2_CORRUPTED;
return;
- } else if (IVD_STREAM_WIDTH_HEIGHT_NOT_SUPPORTED == (s_decode_op.u4_error_code & IVD_ERROR_MASK)) {
+ } else if (IVD_STREAM_WIDTH_HEIGHT_NOT_SUPPORTED ==
+ (ps_decode_op->u4_error_code & IVD_ERROR_MASK)) {
ALOGE("unsupported resolution : %dx%d", mWidth, mHeight);
mSignalledError = true;
work->workletsProcessed = 1u;
work->result = C2_CORRUPTED;
return;
- } else if (IVD_RES_CHANGED == (s_decode_op.u4_error_code & IVD_ERROR_MASK)) {
+ } else if (IVD_RES_CHANGED == (ps_decode_op->u4_error_code & IVD_ERROR_MASK)) {
ALOGV("resolution changed");
drainInternal(DRAIN_COMPONENT_NO_EOS, pool, work);
resetDecoder();
@@ -890,16 +898,16 @@
/* Decode header and get new dimensions */
setParams(mStride, IVD_DECODE_HEADER);
- (void) ivdec_api_function(mDecHandle, &s_decode_ip, &s_decode_op);
- } else if (IS_IVD_FATAL_ERROR(s_decode_op.u4_error_code)) {
- ALOGE("Fatal error in decoder 0x%x", s_decode_op.u4_error_code);
+ (void) ivdec_api_function(mDecHandle, ps_decode_ip, ps_decode_op);
+ } else if (IS_IVD_FATAL_ERROR(ps_decode_op->u4_error_code)) {
+ ALOGE("Fatal error in decoder 0x%x", ps_decode_op->u4_error_code);
mSignalledError = true;
work->workletsProcessed = 1u;
work->result = C2_CORRUPTED;
return;
}
- if (s_decode_op.i4_reorder_depth >= 0 && mOutputDelay != s_decode_op.i4_reorder_depth) {
- mOutputDelay = s_decode_op.i4_reorder_depth;
+ if (ps_decode_op->i4_reorder_depth >= 0 && mOutputDelay != ps_decode_op->i4_reorder_depth) {
+ mOutputDelay = ps_decode_op->i4_reorder_depth;
ALOGV("New Output delay %d ", mOutputDelay);
C2PortActualDelayTuning::output outputDelay(mOutputDelay);
@@ -917,16 +925,16 @@
return;
}
}
- if (0 < s_decode_op.u4_pic_wd && 0 < s_decode_op.u4_pic_ht) {
+ if (0 < ps_decode_op->u4_pic_wd && 0 < ps_decode_op->u4_pic_ht) {
if (mHeaderDecoded == false) {
mHeaderDecoded = true;
- mStride = ALIGN32(s_decode_op.u4_pic_wd);
+ mStride = ALIGN32(ps_decode_op->u4_pic_wd);
setParams(mStride, IVD_DECODE_FRAME);
}
- if (s_decode_op.u4_pic_wd != mWidth || s_decode_op.u4_pic_ht != mHeight) {
- mWidth = s_decode_op.u4_pic_wd;
- mHeight = s_decode_op.u4_pic_ht;
- CHECK_EQ(0u, s_decode_op.u4_output_present);
+ if (ps_decode_op->u4_pic_wd != mWidth || ps_decode_op->u4_pic_ht != mHeight) {
+ mWidth = ps_decode_op->u4_pic_wd;
+ mHeight = ps_decode_op->u4_pic_ht;
+ CHECK_EQ(0u, ps_decode_op->u4_output_present);
C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
std::vector<std::unique_ptr<C2SettingResult>> failures;
@@ -945,11 +953,11 @@
}
}
(void)getVuiParams();
- hasPicture |= (1 == s_decode_op.u4_frame_decoded_flag);
- if (s_decode_op.u4_output_present) {
- finishWork(s_decode_op.u4_ts, work);
+ hasPicture |= (1 == ps_decode_op->u4_frame_decoded_flag);
+ if (ps_decode_op->u4_output_present) {
+ finishWork(ps_decode_op->u4_ts, work);
}
- inPos += s_decode_op.u4_num_bytes_consumed;
+ inPos += ps_decode_op->u4_num_bytes_consumed;
}
if (eos) {
drainInternal(DRAIN_COMPONENT_WITH_EOS, pool, work);
@@ -987,16 +995,18 @@
ALOGE("graphic view map failed %d", wView.error());
return C2_CORRUPTED;
}
- ivd_video_decode_ip_t s_decode_ip;
- ivd_video_decode_op_t s_decode_op;
- if (!setDecodeArgs(&s_decode_ip, &s_decode_op, nullptr, &wView, 0, 0, 0)) {
+ ih264d_video_decode_ip_t s_h264d_decode_ip = {};
+ ih264d_video_decode_op_t s_h264d_decode_op = {};
+ ivd_video_decode_ip_t *ps_decode_ip = &s_h264d_decode_ip.s_ivd_video_decode_ip_t;
+ ivd_video_decode_op_t *ps_decode_op = &s_h264d_decode_op.s_ivd_video_decode_op_t;
+ if (!setDecodeArgs(ps_decode_ip, ps_decode_op, nullptr, &wView, 0, 0, 0)) {
mSignalledError = true;
work->workletsProcessed = 1u;
return C2_CORRUPTED;
}
- (void) ivdec_api_function(mDecHandle, &s_decode_ip, &s_decode_op);
- if (s_decode_op.u4_output_present) {
- finishWork(s_decode_op.u4_ts, work);
+ (void) ivdec_api_function(mDecHandle, &s_h264d_decode_ip, &s_h264d_decode_op);
+ if (ps_decode_op->u4_output_present) {
+ finishWork(ps_decode_op->u4_ts, work);
} else {
fillEmptyWork(work);
break;
diff --git a/media/codec2/components/avc/C2SoftAvcDec.h b/media/codec2/components/avc/C2SoftAvcDec.h
index bd84de0..5c07d29 100644
--- a/media/codec2/components/avc/C2SoftAvcDec.h
+++ b/media/codec2/components/avc/C2SoftAvcDec.h
@@ -25,8 +25,7 @@
#include <SimpleC2Component.h>
#include "ih264_typedefs.h"
-#include "iv.h"
-#include "ivd.h"
+#include "ih264d.h"
namespace android {
diff --git a/media/codec2/components/avc/C2SoftAvcEnc.cpp b/media/codec2/components/avc/C2SoftAvcEnc.cpp
index bf9e5ff..bab651f 100644
--- a/media/codec2/components/avc/C2SoftAvcEnc.cpp
+++ b/media/codec2/components/avc/C2SoftAvcEnc.cpp
@@ -1082,29 +1082,31 @@
/* Getting MemRecords Attributes */
{
- iv_fill_mem_rec_ip_t s_fill_mem_rec_ip;
- iv_fill_mem_rec_op_t s_fill_mem_rec_op;
+ ih264e_fill_mem_rec_ip_t s_ih264e_mem_rec_ip = {};
+ ih264e_fill_mem_rec_op_t s_ih264e_mem_rec_op = {};
+ iv_fill_mem_rec_ip_t *ps_fill_mem_rec_ip = &s_ih264e_mem_rec_ip.s_ive_ip;
+ iv_fill_mem_rec_op_t *ps_fill_mem_rec_op = &s_ih264e_mem_rec_op.s_ive_op;
- s_fill_mem_rec_ip.u4_size = sizeof(iv_fill_mem_rec_ip_t);
- s_fill_mem_rec_op.u4_size = sizeof(iv_fill_mem_rec_op_t);
+ ps_fill_mem_rec_ip->u4_size = sizeof(ih264e_fill_mem_rec_ip_t);
+ ps_fill_mem_rec_op->u4_size = sizeof(ih264e_fill_mem_rec_op_t);
- s_fill_mem_rec_ip.e_cmd = IV_CMD_FILL_NUM_MEM_REC;
- s_fill_mem_rec_ip.ps_mem_rec = mMemRecords;
- s_fill_mem_rec_ip.u4_num_mem_rec = mNumMemRecords;
- s_fill_mem_rec_ip.u4_max_wd = width;
- s_fill_mem_rec_ip.u4_max_ht = height;
- s_fill_mem_rec_ip.u4_max_level = mAVCEncLevel;
- s_fill_mem_rec_ip.e_color_format = DEFAULT_INP_COLOR_FORMAT;
- s_fill_mem_rec_ip.u4_max_ref_cnt = DEFAULT_MAX_REF_FRM;
- s_fill_mem_rec_ip.u4_max_reorder_cnt = DEFAULT_MAX_REORDER_FRM;
- s_fill_mem_rec_ip.u4_max_srch_rng_x = DEFAULT_MAX_SRCH_RANGE_X;
- s_fill_mem_rec_ip.u4_max_srch_rng_y = DEFAULT_MAX_SRCH_RANGE_Y;
+ ps_fill_mem_rec_ip->e_cmd = IV_CMD_FILL_NUM_MEM_REC;
+ ps_fill_mem_rec_ip->ps_mem_rec = mMemRecords;
+ ps_fill_mem_rec_ip->u4_num_mem_rec = mNumMemRecords;
+ ps_fill_mem_rec_ip->u4_max_wd = width;
+ ps_fill_mem_rec_ip->u4_max_ht = height;
+ ps_fill_mem_rec_ip->u4_max_level = mAVCEncLevel;
+ ps_fill_mem_rec_ip->e_color_format = DEFAULT_INP_COLOR_FORMAT;
+ ps_fill_mem_rec_ip->u4_max_ref_cnt = DEFAULT_MAX_REF_FRM;
+ ps_fill_mem_rec_ip->u4_max_reorder_cnt = DEFAULT_MAX_REORDER_FRM;
+ ps_fill_mem_rec_ip->u4_max_srch_rng_x = DEFAULT_MAX_SRCH_RANGE_X;
+ ps_fill_mem_rec_ip->u4_max_srch_rng_y = DEFAULT_MAX_SRCH_RANGE_Y;
- status = ive_api_function(nullptr, &s_fill_mem_rec_ip, &s_fill_mem_rec_op);
+ status = ive_api_function(nullptr, &s_ih264e_mem_rec_ip, &s_ih264e_mem_rec_op);
if (status != IV_SUCCESS) {
ALOGE("Fill memory records failed = 0x%x\n",
- s_fill_mem_rec_op.u4_error_code);
+ ps_fill_mem_rec_op->u4_error_code);
return C2_CORRUPTED;
}
}
@@ -1133,48 +1135,51 @@
/* Codec Instance Creation */
{
- ive_init_ip_t s_init_ip;
- ive_init_op_t s_init_op;
+ ih264e_init_ip_t s_enc_ip = {};
+ ih264e_init_op_t s_enc_op = {};
+
+ ive_init_ip_t *ps_init_ip = &s_enc_ip.s_ive_ip;
+ ive_init_op_t *ps_init_op = &s_enc_op.s_ive_op;
mCodecCtx = (iv_obj_t *)mMemRecords[0].pv_base;
mCodecCtx->u4_size = sizeof(iv_obj_t);
mCodecCtx->pv_fxns = (void *)ive_api_function;
- s_init_ip.u4_size = sizeof(ive_init_ip_t);
- s_init_op.u4_size = sizeof(ive_init_op_t);
+ ps_init_ip->u4_size = sizeof(ih264e_init_ip_t);
+ ps_init_op->u4_size = sizeof(ih264e_init_op_t);
- s_init_ip.e_cmd = IV_CMD_INIT;
- s_init_ip.u4_num_mem_rec = mNumMemRecords;
- s_init_ip.ps_mem_rec = mMemRecords;
- s_init_ip.u4_max_wd = width;
- s_init_ip.u4_max_ht = height;
- s_init_ip.u4_max_ref_cnt = DEFAULT_MAX_REF_FRM;
- s_init_ip.u4_max_reorder_cnt = DEFAULT_MAX_REORDER_FRM;
- s_init_ip.u4_max_level = mAVCEncLevel;
- s_init_ip.e_inp_color_fmt = mIvVideoColorFormat;
+ ps_init_ip->e_cmd = IV_CMD_INIT;
+ ps_init_ip->u4_num_mem_rec = mNumMemRecords;
+ ps_init_ip->ps_mem_rec = mMemRecords;
+ ps_init_ip->u4_max_wd = width;
+ ps_init_ip->u4_max_ht = height;
+ ps_init_ip->u4_max_ref_cnt = DEFAULT_MAX_REF_FRM;
+ ps_init_ip->u4_max_reorder_cnt = DEFAULT_MAX_REORDER_FRM;
+ ps_init_ip->u4_max_level = mAVCEncLevel;
+ ps_init_ip->e_inp_color_fmt = mIvVideoColorFormat;
if (mReconEnable || mPSNREnable) {
- s_init_ip.u4_enable_recon = 1;
+ ps_init_ip->u4_enable_recon = 1;
} else {
- s_init_ip.u4_enable_recon = 0;
+ ps_init_ip->u4_enable_recon = 0;
}
- s_init_ip.e_recon_color_fmt = DEFAULT_RECON_COLOR_FORMAT;
- s_init_ip.e_rc_mode = DEFAULT_RC_MODE;
- s_init_ip.u4_max_framerate = DEFAULT_MAX_FRAMERATE;
- s_init_ip.u4_max_bitrate = DEFAULT_MAX_BITRATE;
- s_init_ip.u4_num_bframes = mBframes;
- s_init_ip.e_content_type = IV_PROGRESSIVE;
- s_init_ip.u4_max_srch_rng_x = DEFAULT_MAX_SRCH_RANGE_X;
- s_init_ip.u4_max_srch_rng_y = DEFAULT_MAX_SRCH_RANGE_Y;
- s_init_ip.e_slice_mode = mSliceMode;
- s_init_ip.u4_slice_param = mSliceParam;
- s_init_ip.e_arch = mArch;
- s_init_ip.e_soc = DEFAULT_SOC;
+ ps_init_ip->e_recon_color_fmt = DEFAULT_RECON_COLOR_FORMAT;
+ ps_init_ip->e_rc_mode = DEFAULT_RC_MODE;
+ ps_init_ip->u4_max_framerate = DEFAULT_MAX_FRAMERATE;
+ ps_init_ip->u4_max_bitrate = DEFAULT_MAX_BITRATE;
+ ps_init_ip->u4_num_bframes = mBframes;
+ ps_init_ip->e_content_type = IV_PROGRESSIVE;
+ ps_init_ip->u4_max_srch_rng_x = DEFAULT_MAX_SRCH_RANGE_X;
+ ps_init_ip->u4_max_srch_rng_y = DEFAULT_MAX_SRCH_RANGE_Y;
+ ps_init_ip->e_slice_mode = mSliceMode;
+ ps_init_ip->u4_slice_param = mSliceParam;
+ ps_init_ip->e_arch = mArch;
+ ps_init_ip->e_soc = DEFAULT_SOC;
- status = ive_api_function(mCodecCtx, &s_init_ip, &s_init_op);
+ status = ive_api_function(mCodecCtx, &s_enc_ip, &s_enc_op);
if (status != IV_SUCCESS) {
- ALOGE("Init encoder failed = 0x%x\n", s_init_op.u4_error_code);
+ ALOGE("Init encoder failed = 0x%x\n", ps_init_op->u4_error_code);
return C2_CORRUPTED;
}
}
@@ -1502,15 +1507,17 @@
}
// while (!mSawOutputEOS && !outQueue.empty()) {
c2_status_t error;
- ive_video_encode_ip_t s_encode_ip;
- ive_video_encode_op_t s_encode_op;
- memset(&s_encode_op, 0, sizeof(s_encode_op));
+ ih264e_video_encode_ip_t s_video_encode_ip = {};
+ ih264e_video_encode_op_t s_video_encode_op = {};
+ ive_video_encode_ip_t *ps_encode_ip = &s_video_encode_ip.s_ive_ip;
+ ive_video_encode_op_t *ps_encode_op = &s_video_encode_op.s_ive_op;
+ memset(ps_encode_op, 0, sizeof(*ps_encode_op));
if (!mSpsPpsHeaderReceived) {
constexpr uint32_t kHeaderLength = MIN_STREAM_SIZE;
uint8_t header[kHeaderLength];
error = setEncodeArgs(
- &s_encode_ip, &s_encode_op, nullptr, header, kHeaderLength, workIndex);
+ ps_encode_ip, ps_encode_op, nullptr, header, kHeaderLength, workIndex);
if (error != C2_OK) {
ALOGE("setEncodeArgs failed: %d", error);
mSignalledError = true;
@@ -1518,22 +1525,22 @@
work->workletsProcessed = 1u;
return;
}
- status = ive_api_function(mCodecCtx, &s_encode_ip, &s_encode_op);
+ status = ive_api_function(mCodecCtx, ps_encode_ip, ps_encode_op);
if (IV_SUCCESS != status) {
ALOGE("Encode header failed = 0x%x\n",
- s_encode_op.u4_error_code);
+ ps_encode_op->u4_error_code);
work->workletsProcessed = 1u;
return;
} else {
ALOGV("Bytes Generated in header %d\n",
- s_encode_op.s_out_buf.u4_bytes);
+ ps_encode_op->s_out_buf.u4_bytes);
}
mSpsPpsHeaderReceived = true;
std::unique_ptr<C2StreamInitDataInfo::output> csd =
- C2StreamInitDataInfo::output::AllocUnique(s_encode_op.s_out_buf.u4_bytes, 0u);
+ C2StreamInitDataInfo::output::AllocUnique(ps_encode_op->s_out_buf.u4_bytes, 0u);
if (!csd) {
ALOGE("CSD allocation failed");
mSignalledError = true;
@@ -1541,7 +1548,7 @@
work->workletsProcessed = 1u;
return;
}
- memcpy(csd->m.value, header, s_encode_op.s_out_buf.u4_bytes);
+ memcpy(csd->m.value, header, ps_encode_op->s_out_buf.u4_bytes);
work->worklets.front()->output.configUpdate.push_back(std::move(csd));
DUMP_TO_FILE(
@@ -1635,7 +1642,7 @@
}
error = setEncodeArgs(
- &s_encode_ip, &s_encode_op, view.get(), wView.base(), wView.capacity(), workIndex);
+ ps_encode_ip, ps_encode_op, view.get(), wView.base(), wView.capacity(), workIndex);
if (error != C2_OK) {
ALOGE("setEncodeArgs failed : %d", error);
mSignalledError = true;
@@ -1652,17 +1659,17 @@
/* Compute time elapsed between end of previous decode()
* to start of current decode() */
TIME_DIFF(mTimeEnd, mTimeStart, timeDelay);
- status = ive_api_function(mCodecCtx, &s_encode_ip, &s_encode_op);
+ status = ive_api_function(mCodecCtx, &s_video_encode_ip, &s_video_encode_op);
if (IV_SUCCESS != status) {
- if ((s_encode_op.u4_error_code & 0xFF) == IH264E_BITSTREAM_BUFFER_OVERFLOW) {
+ if ((ps_encode_op->u4_error_code & 0xFF) == IH264E_BITSTREAM_BUFFER_OVERFLOW) {
// TODO: use IVE_CMD_CTL_GETBUFINFO for proper max input size?
mOutBufferSize *= 2;
mOutBlock.reset();
continue;
}
ALOGE("Encode Frame failed = 0x%x\n",
- s_encode_op.u4_error_code);
+ ps_encode_op->u4_error_code);
mSignalledError = true;
work->result = C2_CORRUPTED;
work->workletsProcessed = 1u;
@@ -1672,7 +1679,7 @@
// Hold input buffer reference
if (inputBuffer) {
- mBuffers[s_encode_ip.s_inp_buf.apv_bufs[0]] = inputBuffer;
+ mBuffers[ps_encode_ip->s_inp_buf.apv_bufs[0]] = inputBuffer;
}
GETTIME(&mTimeEnd, nullptr);
@@ -1680,9 +1687,9 @@
TIME_DIFF(mTimeStart, mTimeEnd, timeTaken);
ALOGV("timeTaken=%6d delay=%6d numBytes=%6d", timeTaken, timeDelay,
- s_encode_op.s_out_buf.u4_bytes);
+ ps_encode_op->s_out_buf.u4_bytes);
- void *freed = s_encode_op.s_inp_buf.apv_bufs[0];
+ void *freed = ps_encode_op->s_inp_buf.apv_bufs[0];
/* If encoder frees up an input buffer, mark it as free */
if (freed != nullptr) {
if (mBuffers.count(freed) == 0u) {
@@ -1694,17 +1701,17 @@
}
}
- if (s_encode_op.output_present) {
- if (!s_encode_op.s_out_buf.u4_bytes) {
+ if (ps_encode_op->output_present) {
+ if (!ps_encode_op->s_out_buf.u4_bytes) {
ALOGE("Error: Output present but bytes generated is zero");
mSignalledError = true;
work->result = C2_CORRUPTED;
work->workletsProcessed = 1u;
return;
}
- uint64_t workId = ((uint64_t)s_encode_op.u4_timestamp_high << 32) |
- s_encode_op.u4_timestamp_low;
- finishWork(workId, work, &s_encode_op);
+ uint64_t workId = ((uint64_t)ps_encode_op->u4_timestamp_high << 32) |
+ ps_encode_op->u4_timestamp_low;
+ finishWork(workId, work, ps_encode_op);
}
if (mSawInputEOS) {
drainInternal(DRAIN_COMPONENT_WITH_EOS, pool, work);
@@ -1744,9 +1751,11 @@
ALOGE("graphic view map failed %d", wView.error());
return C2_CORRUPTED;
}
- ive_video_encode_ip_t s_encode_ip;
- ive_video_encode_op_t s_encode_op;
- if (C2_OK != setEncodeArgs(&s_encode_ip, &s_encode_op, nullptr,
+ ih264e_video_encode_ip_t s_video_encode_ip = {};
+ ih264e_video_encode_op_t s_video_encode_op = {};
+ ive_video_encode_ip_t *ps_encode_ip = &s_video_encode_ip.s_ive_ip;
+ ive_video_encode_op_t *ps_encode_op = &s_video_encode_op.s_ive_op;
+ if (C2_OK != setEncodeArgs(ps_encode_ip, ps_encode_op, nullptr,
wView.base(), wView.capacity(), 0)) {
ALOGE("setEncodeArgs failed for drainInternal");
mSignalledError = true;
@@ -1754,9 +1763,9 @@
work->workletsProcessed = 1u;
return C2_CORRUPTED;
}
- (void)ive_api_function(mCodecCtx, &s_encode_ip, &s_encode_op);
+ (void)ive_api_function(mCodecCtx, &s_video_encode_ip, &s_video_encode_op);
- void *freed = s_encode_op.s_inp_buf.apv_bufs[0];
+ void *freed = ps_encode_op->s_inp_buf.apv_bufs[0];
/* If encoder frees up an input buffer, mark it as free */
if (freed != nullptr) {
if (mBuffers.count(freed) == 0u) {
@@ -1768,10 +1777,10 @@
}
}
- if (s_encode_op.output_present) {
- uint64_t workId = ((uint64_t)s_encode_op.u4_timestamp_high << 32) |
- s_encode_op.u4_timestamp_low;
- finishWork(workId, work, &s_encode_op);
+ if (ps_encode_op->output_present) {
+ uint64_t workId = ((uint64_t)ps_encode_op->u4_timestamp_high << 32) |
+ ps_encode_op->u4_timestamp_low;
+ finishWork(workId, work, ps_encode_op);
} else {
if (work->workletsProcessed != 1u) {
work->worklets.front()->output.flags = work->input.flags;
diff --git a/media/codec2/components/avc/C2SoftAvcEnc.h b/media/codec2/components/avc/C2SoftAvcEnc.h
index ee6d47a..673a282 100644
--- a/media/codec2/components/avc/C2SoftAvcEnc.h
+++ b/media/codec2/components/avc/C2SoftAvcEnc.h
@@ -24,8 +24,7 @@
#include <SimpleC2Component.h>
#include "ih264_typedefs.h"
-#include "iv2.h"
-#include "ive2.h"
+#include "ih264e.h"
namespace android {
diff --git a/media/codec2/components/base/SimpleC2Component.cpp b/media/codec2/components/base/SimpleC2Component.cpp
index fb3fbd0..dfad226 100644
--- a/media/codec2/components/base/SimpleC2Component.cpp
+++ b/media/codec2/components/base/SimpleC2Component.cpp
@@ -110,17 +110,20 @@
}
case kWhatStop: {
int32_t err = thiz->onStop();
+ thiz->mOutputBlockPool.reset();
Reply(msg, &err);
break;
}
case kWhatReset: {
thiz->onReset();
+ thiz->mOutputBlockPool.reset();
mRunning = false;
Reply(msg);
break;
}
case kWhatRelease: {
thiz->onRelease();
+ thiz->mOutputBlockPool.reset();
mRunning = false;
Reply(msg);
break;
diff --git a/media/codec2/components/hevc/C2SoftHevcDec.cpp b/media/codec2/components/hevc/C2SoftHevcDec.cpp
index a374dfa..6bcf3a2 100644
--- a/media/codec2/components/hevc/C2SoftHevcDec.cpp
+++ b/media/codec2/components/hevc/C2SoftHevcDec.cpp
@@ -26,7 +26,6 @@
#include <SimpleC2Interface.h>
#include "C2SoftHevcDec.h"
-#include "ihevcd_cxa.h"
namespace android {
@@ -380,12 +379,14 @@
}
while (true) {
- ivd_video_decode_ip_t s_decode_ip;
- ivd_video_decode_op_t s_decode_op;
+ ihevcd_cxa_video_decode_ip_t s_hevcd_decode_ip = {};
+ ihevcd_cxa_video_decode_op_t s_hevcd_decode_op = {};
+ ivd_video_decode_ip_t *ps_decode_ip = &s_hevcd_decode_ip.s_ivd_video_decode_ip_t;
+ ivd_video_decode_op_t *ps_decode_op = &s_hevcd_decode_op.s_ivd_video_decode_op_t;
- setDecodeArgs(&s_decode_ip, &s_decode_op, nullptr, nullptr, 0, 0, 0);
- (void) ivdec_api_function(mDecHandle, &s_decode_ip, &s_decode_op);
- if (0 == s_decode_op.u4_output_present) {
+ setDecodeArgs(ps_decode_ip, ps_decode_op, nullptr, nullptr, 0, 0, 0);
+ (void) ivdec_api_function(mDecHandle, ps_decode_ip, ps_decode_op);
+ if (0 == ps_decode_op->u4_output_present) {
resetPlugin();
break;
}
@@ -400,8 +401,8 @@
}
status_t C2SoftHevcDec::createDecoder() {
- ivdext_create_ip_t s_create_ip;
- ivdext_create_op_t s_create_op;
+ ivdext_create_ip_t s_create_ip = {};
+ ivdext_create_op_t s_create_op = {};
s_create_ip.s_ivd_create_ip_t.u4_size = sizeof(ivdext_create_ip_t);
s_create_ip.s_ivd_create_ip_t.e_cmd = IVD_CMD_CREATE;
@@ -427,8 +428,8 @@
}
status_t C2SoftHevcDec::setNumCores() {
- ivdext_ctl_set_num_cores_ip_t s_set_num_cores_ip;
- ivdext_ctl_set_num_cores_op_t s_set_num_cores_op;
+ ivdext_ctl_set_num_cores_ip_t s_set_num_cores_ip = {};
+ ivdext_ctl_set_num_cores_op_t s_set_num_cores_op = {};
s_set_num_cores_ip.u4_size = sizeof(ivdext_ctl_set_num_cores_ip_t);
s_set_num_cores_ip.e_cmd = IVD_CMD_VIDEO_CTL;
@@ -447,22 +448,26 @@
}
status_t C2SoftHevcDec::setParams(size_t stride, IVD_VIDEO_DECODE_MODE_T dec_mode) {
- ivd_ctl_set_config_ip_t s_set_dyn_params_ip;
- ivd_ctl_set_config_op_t s_set_dyn_params_op;
+ ihevcd_cxa_ctl_set_config_ip_t s_hevcd_set_dyn_params_ip = {};
+ ihevcd_cxa_ctl_set_config_op_t s_hevcd_set_dyn_params_op = {};
+ ivd_ctl_set_config_ip_t *ps_set_dyn_params_ip =
+ &s_hevcd_set_dyn_params_ip.s_ivd_ctl_set_config_ip_t;
+ ivd_ctl_set_config_op_t *ps_set_dyn_params_op =
+ &s_hevcd_set_dyn_params_op.s_ivd_ctl_set_config_op_t;
- s_set_dyn_params_ip.u4_size = sizeof(ivd_ctl_set_config_ip_t);
- s_set_dyn_params_ip.e_cmd = IVD_CMD_VIDEO_CTL;
- s_set_dyn_params_ip.e_sub_cmd = IVD_CMD_CTL_SETPARAMS;
- s_set_dyn_params_ip.u4_disp_wd = (UWORD32) stride;
- s_set_dyn_params_ip.e_frm_skip_mode = IVD_SKIP_NONE;
- s_set_dyn_params_ip.e_frm_out_mode = IVD_DISPLAY_FRAME_OUT;
- s_set_dyn_params_ip.e_vid_dec_mode = dec_mode;
- s_set_dyn_params_op.u4_size = sizeof(ivd_ctl_set_config_op_t);
+ ps_set_dyn_params_ip->u4_size = sizeof(ihevcd_cxa_ctl_set_config_ip_t);
+ ps_set_dyn_params_ip->e_cmd = IVD_CMD_VIDEO_CTL;
+ ps_set_dyn_params_ip->e_sub_cmd = IVD_CMD_CTL_SETPARAMS;
+ ps_set_dyn_params_ip->u4_disp_wd = (UWORD32) stride;
+ ps_set_dyn_params_ip->e_frm_skip_mode = IVD_SKIP_NONE;
+ ps_set_dyn_params_ip->e_frm_out_mode = IVD_DISPLAY_FRAME_OUT;
+ ps_set_dyn_params_ip->e_vid_dec_mode = dec_mode;
+ ps_set_dyn_params_op->u4_size = sizeof(ihevcd_cxa_ctl_set_config_op_t);
IV_API_CALL_STATUS_T status = ivdec_api_function(mDecHandle,
- &s_set_dyn_params_ip,
- &s_set_dyn_params_op);
+ ps_set_dyn_params_ip,
+ ps_set_dyn_params_op);
if (status != IV_SUCCESS) {
- ALOGE("error in %s: 0x%x", __func__, s_set_dyn_params_op.u4_error_code);
+ ALOGE("error in %s: 0x%x", __func__, ps_set_dyn_params_op->u4_error_code);
return UNKNOWN_ERROR;
}
@@ -470,8 +475,8 @@
}
status_t C2SoftHevcDec::getVersion() {
- ivd_ctl_getversioninfo_ip_t s_get_versioninfo_ip;
- ivd_ctl_getversioninfo_op_t s_get_versioninfo_op;
+ ivd_ctl_getversioninfo_ip_t s_get_versioninfo_ip = {};
+ ivd_ctl_getversioninfo_op_t s_get_versioninfo_op = {};
UWORD8 au1_buf[512];
s_get_versioninfo_ip.u4_size = sizeof(ivd_ctl_getversioninfo_ip_t);
@@ -529,7 +534,7 @@
if (OK != setParams(mStride, IVD_DECODE_FRAME)) return false;
}
- ps_decode_ip->u4_size = sizeof(ivd_video_decode_ip_t);
+ ps_decode_ip->u4_size = sizeof(ihevcd_cxa_video_decode_ip_t);
ps_decode_ip->e_cmd = IVD_CMD_VIDEO_DECODE;
if (inBuffer) {
ps_decode_ip->u4_ts = tsMarker;
@@ -558,15 +563,15 @@
ps_decode_ip->s_out_buffer.pu1_bufs[2] = mOutBufferFlush + lumaSize + chromaSize;
}
ps_decode_ip->s_out_buffer.u4_num_bufs = 3;
- ps_decode_op->u4_size = sizeof(ivd_video_decode_op_t);
+ ps_decode_op->u4_size = sizeof(ihevcd_cxa_video_decode_op_t);
ps_decode_op->u4_output_present = 0;
return true;
}
bool C2SoftHevcDec::getVuiParams() {
- ivdext_ctl_get_vui_params_ip_t s_get_vui_params_ip;
- ivdext_ctl_get_vui_params_op_t s_get_vui_params_op;
+ ivdext_ctl_get_vui_params_ip_t s_get_vui_params_ip = {};
+ ivdext_ctl_get_vui_params_op_t s_get_vui_params_op = {};
s_get_vui_params_ip.u4_size = sizeof(ivdext_ctl_get_vui_params_ip_t);
s_get_vui_params_ip.e_cmd = IVD_CMD_VIDEO_CTL;
@@ -614,8 +619,8 @@
}
status_t C2SoftHevcDec::setFlushMode() {
- ivd_ctl_flush_ip_t s_set_flush_ip;
- ivd_ctl_flush_op_t s_set_flush_op;
+ ivd_ctl_flush_ip_t s_set_flush_ip = {};
+ ivd_ctl_flush_op_t s_set_flush_op = {};
s_set_flush_ip.u4_size = sizeof(ivd_ctl_flush_ip_t);
s_set_flush_ip.e_cmd = IVD_CMD_VIDEO_CTL;
@@ -633,8 +638,8 @@
}
status_t C2SoftHevcDec::resetDecoder() {
- ivd_ctl_reset_ip_t s_reset_ip;
- ivd_ctl_reset_op_t s_reset_op;
+ ivd_ctl_reset_ip_t s_reset_ip = {};
+ ivd_ctl_reset_op_t s_reset_op = {};
s_reset_ip.u4_size = sizeof(ivd_ctl_reset_ip_t);
s_reset_ip.e_cmd = IVD_CMD_VIDEO_CTL;
@@ -662,8 +667,8 @@
status_t C2SoftHevcDec::deleteDecoder() {
if (mDecHandle) {
- ivdext_delete_ip_t s_delete_ip;
- ivdext_delete_op_t s_delete_op;
+ ivdext_delete_ip_t s_delete_ip = {};
+ ivdext_delete_op_t s_delete_op = {};
s_delete_ip.s_ivd_delete_ip_t.u4_size = sizeof(ivdext_delete_ip_t);
s_delete_ip.s_ivd_delete_ip_t.e_cmd = IVD_CMD_DELETE;
@@ -835,9 +840,11 @@
work->result = wView.error();
return;
}
- ivd_video_decode_ip_t s_decode_ip;
- ivd_video_decode_op_t s_decode_op;
- if (!setDecodeArgs(&s_decode_ip, &s_decode_op, &rView, &wView,
+ ihevcd_cxa_video_decode_ip_t s_hevcd_decode_ip = {};
+ ihevcd_cxa_video_decode_op_t s_hevcd_decode_op = {};
+ ivd_video_decode_ip_t *ps_decode_ip = &s_hevcd_decode_ip.s_ivd_video_decode_ip_t;
+ ivd_video_decode_op_t *ps_decode_op = &s_hevcd_decode_op.s_ivd_video_decode_op_t;
+ if (!setDecodeArgs(ps_decode_ip, ps_decode_op, &rView, &wView,
inOffset + inPos, inSize - inPos, workIndex)) {
mSignalledError = true;
work->workletsProcessed = 1u;
@@ -852,26 +859,26 @@
WORD32 delay;
GETTIME(&mTimeStart, nullptr);
TIME_DIFF(mTimeEnd, mTimeStart, delay);
- (void) ivdec_api_function(mDecHandle, &s_decode_ip, &s_decode_op);
+ (void) ivdec_api_function(mDecHandle, ps_decode_ip, ps_decode_op);
WORD32 decodeTime;
GETTIME(&mTimeEnd, nullptr);
TIME_DIFF(mTimeStart, mTimeEnd, decodeTime);
ALOGV("decodeTime=%6d delay=%6d numBytes=%6d", decodeTime, delay,
- s_decode_op.u4_num_bytes_consumed);
- if (IVD_MEM_ALLOC_FAILED == (s_decode_op.u4_error_code & IVD_ERROR_MASK)) {
+ ps_decode_op->u4_num_bytes_consumed);
+ if (IVD_MEM_ALLOC_FAILED == (ps_decode_op->u4_error_code & IVD_ERROR_MASK)) {
ALOGE("allocation failure in decoder");
mSignalledError = true;
work->workletsProcessed = 1u;
work->result = C2_CORRUPTED;
return;
} else if (IVD_STREAM_WIDTH_HEIGHT_NOT_SUPPORTED ==
- (s_decode_op.u4_error_code & IVD_ERROR_MASK)) {
+ (ps_decode_op->u4_error_code & IVD_ERROR_MASK)) {
ALOGE("unsupported resolution : %dx%d", mWidth, mHeight);
mSignalledError = true;
work->workletsProcessed = 1u;
work->result = C2_CORRUPTED;
return;
- } else if (IVD_RES_CHANGED == (s_decode_op.u4_error_code & IVD_ERROR_MASK)) {
+ } else if (IVD_RES_CHANGED == (ps_decode_op->u4_error_code & IVD_ERROR_MASK)) {
ALOGV("resolution changed");
drainInternal(DRAIN_COMPONENT_NO_EOS, pool, work);
resetDecoder();
@@ -880,16 +887,16 @@
/* Decode header and get new dimensions */
setParams(mStride, IVD_DECODE_HEADER);
- (void) ivdec_api_function(mDecHandle, &s_decode_ip, &s_decode_op);
- } else if (IS_IVD_FATAL_ERROR(s_decode_op.u4_error_code)) {
- ALOGE("Fatal error in decoder 0x%x", s_decode_op.u4_error_code);
+ (void) ivdec_api_function(mDecHandle, ps_decode_ip, ps_decode_op);
+ } else if (IS_IVD_FATAL_ERROR(ps_decode_op->u4_error_code)) {
+ ALOGE("Fatal error in decoder 0x%x", ps_decode_op->u4_error_code);
mSignalledError = true;
work->workletsProcessed = 1u;
work->result = C2_CORRUPTED;
return;
}
- if (s_decode_op.i4_reorder_depth >= 0 && mOutputDelay != s_decode_op.i4_reorder_depth) {
- mOutputDelay = s_decode_op.i4_reorder_depth;
+ if (ps_decode_op->i4_reorder_depth >= 0 && mOutputDelay != ps_decode_op->i4_reorder_depth) {
+ mOutputDelay = ps_decode_op->i4_reorder_depth;
ALOGV("New Output delay %d ", mOutputDelay);
C2PortActualDelayTuning::output outputDelay(mOutputDelay);
@@ -907,15 +914,15 @@
return;
}
}
- if (0 < s_decode_op.u4_pic_wd && 0 < s_decode_op.u4_pic_ht) {
+ if (0 < ps_decode_op->u4_pic_wd && 0 < ps_decode_op->u4_pic_ht) {
if (mHeaderDecoded == false) {
mHeaderDecoded = true;
- setParams(ALIGN32(s_decode_op.u4_pic_wd), IVD_DECODE_FRAME);
+ setParams(ALIGN32(ps_decode_op->u4_pic_wd), IVD_DECODE_FRAME);
}
- if (s_decode_op.u4_pic_wd != mWidth || s_decode_op.u4_pic_ht != mHeight) {
- mWidth = s_decode_op.u4_pic_wd;
- mHeight = s_decode_op.u4_pic_ht;
- CHECK_EQ(0u, s_decode_op.u4_output_present);
+ if (ps_decode_op->u4_pic_wd != mWidth || ps_decode_op->u4_pic_ht != mHeight) {
+ mWidth = ps_decode_op->u4_pic_wd;
+ mHeight = ps_decode_op->u4_pic_ht;
+ CHECK_EQ(0u, ps_decode_op->u4_output_present);
C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
std::vector<std::unique_ptr<C2SettingResult>> failures;
@@ -935,15 +942,15 @@
}
}
(void) getVuiParams();
- hasPicture |= (1 == s_decode_op.u4_frame_decoded_flag);
- if (s_decode_op.u4_output_present) {
- finishWork(s_decode_op.u4_ts, work);
+ hasPicture |= (1 == ps_decode_op->u4_frame_decoded_flag);
+ if (ps_decode_op->u4_output_present) {
+ finishWork(ps_decode_op->u4_ts, work);
}
- if (0 == s_decode_op.u4_num_bytes_consumed) {
+ if (0 == ps_decode_op->u4_num_bytes_consumed) {
ALOGD("Bytes consumed is zero. Ignoring remaining bytes");
break;
}
- inPos += s_decode_op.u4_num_bytes_consumed;
+ inPos += ps_decode_op->u4_num_bytes_consumed;
if (hasPicture && (inSize - inPos)) {
ALOGD("decoded frame in current access nal, ignoring further trailing bytes %d",
(int)inSize - (int)inPos);
@@ -985,16 +992,18 @@
ALOGE("graphic view map failed %d", wView.error());
return C2_CORRUPTED;
}
- ivd_video_decode_ip_t s_decode_ip;
- ivd_video_decode_op_t s_decode_op;
- if (!setDecodeArgs(&s_decode_ip, &s_decode_op, nullptr, &wView, 0, 0, 0)) {
+ ihevcd_cxa_video_decode_ip_t s_hevcd_decode_ip = {};
+ ihevcd_cxa_video_decode_op_t s_hevcd_decode_op = {};
+ ivd_video_decode_ip_t *ps_decode_ip = &s_hevcd_decode_ip.s_ivd_video_decode_ip_t;
+ ivd_video_decode_op_t *ps_decode_op = &s_hevcd_decode_op.s_ivd_video_decode_op_t;
+ if (!setDecodeArgs(ps_decode_ip, ps_decode_op, nullptr, &wView, 0, 0, 0)) {
mSignalledError = true;
work->workletsProcessed = 1u;
return C2_CORRUPTED;
}
- (void) ivdec_api_function(mDecHandle, &s_decode_ip, &s_decode_op);
- if (s_decode_op.u4_output_present) {
- finishWork(s_decode_op.u4_ts, work);
+ (void) ivdec_api_function(mDecHandle, ps_decode_ip, ps_decode_op);
+ if (ps_decode_op->u4_output_present) {
+ finishWork(ps_decode_op->u4_ts, work);
} else {
fillEmptyWork(work);
break;
diff --git a/media/codec2/components/hevc/C2SoftHevcDec.h b/media/codec2/components/hevc/C2SoftHevcDec.h
index 600d7c1..b9b0a48 100644
--- a/media/codec2/components/hevc/C2SoftHevcDec.h
+++ b/media/codec2/components/hevc/C2SoftHevcDec.h
@@ -23,8 +23,7 @@
#include <SimpleC2Component.h>
#include "ihevc_typedefs.h"
-#include "iv.h"
-#include "ivd.h"
+#include "ihevcd_cxa.h"
namespace android {
diff --git a/media/codec2/hidl/1.0/vts/.clang-format b/media/codec2/hidl/1.0/vts/.clang-format
new file mode 120000
index 0000000..136279c
--- /dev/null
+++ b/media/codec2/hidl/1.0/vts/.clang-format
@@ -0,0 +1 @@
+../../../../../../../build/soong/scripts/system-clang-format
\ No newline at end of file
diff --git a/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp b/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp
index 1f95eaf..efc5813 100644
--- a/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp
@@ -33,11 +33,11 @@
using android::C2AllocatorIon;
#include "media_c2_hidl_test_common.h"
+using DecodeTestParameters = std::tuple<std::string, std::string, uint32_t, bool>;
+static std::vector<DecodeTestParameters> kDecodeTestParameters;
-static std::vector<std::tuple<std::string, std::string, std::string, std::string>>
- kDecodeTestParameters;
-
-static std::vector<std::tuple<std::string, std::string, std::string>> kCsdFlushTestParameters;
+using CsdFlushTestParameters = std::tuple<std::string, std::string, bool>;
+static std::vector<CsdFlushTestParameters> kCsdFlushTestParameters;
struct CompToURL {
std::string mime;
@@ -46,36 +46,26 @@
};
std::vector<CompToURL> kCompToURL = {
- {"mp4a-latm",
- "bbb_aac_stereo_128kbps_48000hz.aac", "bbb_aac_stereo_128kbps_48000hz.info"},
- {"mp4a-latm",
- "bbb_aac_stereo_128kbps_48000hz.aac", "bbb_aac_stereo_128kbps_48000hz_multi_frame.info"},
- {"audio/mpeg",
- "bbb_mp3_stereo_192kbps_48000hz.mp3", "bbb_mp3_stereo_192kbps_48000hz.info"},
- {"audio/mpeg",
- "bbb_mp3_stereo_192kbps_48000hz.mp3", "bbb_mp3_stereo_192kbps_48000hz_multi_frame.info"},
- {"3gpp",
- "sine_amrnb_1ch_12kbps_8000hz.amrnb", "sine_amrnb_1ch_12kbps_8000hz.info"},
- {"3gpp",
- "sine_amrnb_1ch_12kbps_8000hz.amrnb", "sine_amrnb_1ch_12kbps_8000hz_multi_frame.info"},
- {"amr-wb",
- "bbb_amrwb_1ch_14kbps_16000hz.amrwb", "bbb_amrwb_1ch_14kbps_16000hz.info"},
- {"amr-wb",
- "bbb_amrwb_1ch_14kbps_16000hz.amrwb", "bbb_amrwb_1ch_14kbps_16000hz_multi_frame.info"},
- {"vorbis",
- "bbb_vorbis_stereo_128kbps_48000hz.vorbis", "bbb_vorbis_stereo_128kbps_48000hz.info"},
- {"opus",
- "bbb_opus_stereo_128kbps_48000hz.opus", "bbb_opus_stereo_128kbps_48000hz.info"},
- {"g711-alaw",
- "bbb_g711alaw_1ch_8khz.raw", "bbb_g711alaw_1ch_8khz.info"},
- {"g711-mlaw",
- "bbb_g711mulaw_1ch_8khz.raw", "bbb_g711mulaw_1ch_8khz.info"},
- {"gsm",
- "bbb_gsm_1ch_8khz_13kbps.raw", "bbb_gsm_1ch_8khz_13kbps.info"},
- {"raw",
- "bbb_raw_1ch_8khz_s32le.raw", "bbb_raw_1ch_8khz_s32le.info"},
- {"flac",
- "bbb_flac_stereo_680kbps_48000hz.flac", "bbb_flac_stereo_680kbps_48000hz.info"},
+ {"mp4a-latm", "bbb_aac_stereo_128kbps_48000hz.aac", "bbb_aac_stereo_128kbps_48000hz.info"},
+ {"mp4a-latm", "bbb_aac_stereo_128kbps_48000hz.aac",
+ "bbb_aac_stereo_128kbps_48000hz_multi_frame.info"},
+ {"audio/mpeg", "bbb_mp3_stereo_192kbps_48000hz.mp3", "bbb_mp3_stereo_192kbps_48000hz.info"},
+ {"audio/mpeg", "bbb_mp3_stereo_192kbps_48000hz.mp3",
+ "bbb_mp3_stereo_192kbps_48000hz_multi_frame.info"},
+ {"3gpp", "sine_amrnb_1ch_12kbps_8000hz.amrnb", "sine_amrnb_1ch_12kbps_8000hz.info"},
+ {"3gpp", "sine_amrnb_1ch_12kbps_8000hz.amrnb",
+ "sine_amrnb_1ch_12kbps_8000hz_multi_frame.info"},
+ {"amr-wb", "bbb_amrwb_1ch_14kbps_16000hz.amrwb", "bbb_amrwb_1ch_14kbps_16000hz.info"},
+ {"amr-wb", "bbb_amrwb_1ch_14kbps_16000hz.amrwb",
+ "bbb_amrwb_1ch_14kbps_16000hz_multi_frame.info"},
+ {"vorbis", "bbb_vorbis_stereo_128kbps_48000hz.vorbis",
+ "bbb_vorbis_stereo_128kbps_48000hz.info"},
+ {"opus", "bbb_opus_stereo_128kbps_48000hz.opus", "bbb_opus_stereo_128kbps_48000hz.info"},
+ {"g711-alaw", "bbb_g711alaw_1ch_8khz.raw", "bbb_g711alaw_1ch_8khz.info"},
+ {"g711-mlaw", "bbb_g711mulaw_1ch_8khz.raw", "bbb_g711mulaw_1ch_8khz.info"},
+ {"gsm", "bbb_gsm_1ch_8khz_13kbps.raw", "bbb_gsm_1ch_8khz_13kbps.info"},
+ {"raw", "bbb_raw_1ch_8khz_s32le.raw", "bbb_raw_1ch_8khz_s32le.info"},
+ {"flac", "bbb_flac_stereo_680kbps_48000hz.flac", "bbb_flac_stereo_680kbps_48000hz.info"},
};
class LinearBuffer : public C2Buffer {
@@ -212,9 +202,8 @@
}
};
-class Codec2AudioDecHidlTest
- : public Codec2AudioDecHidlTestBase,
- public ::testing::WithParamInterface<std::tuple<std::string, std::string>> {
+class Codec2AudioDecHidlTest : public Codec2AudioDecHidlTestBase,
+ public ::testing::WithParamInterface<TestParameters> {
void getParams() {
mInstanceName = std::get<0>(GetParam());
mComponentName = std::get<1>(GetParam());
@@ -438,10 +427,8 @@
ASSERT_EQ(mComponent->stop(), C2_OK);
}
-class Codec2AudioDecDecodeTest
- : public Codec2AudioDecHidlTestBase,
- public ::testing::WithParamInterface<
- std::tuple<std::string, std::string, std::string, std::string>> {
+class Codec2AudioDecDecodeTest : public Codec2AudioDecHidlTestBase,
+ public ::testing::WithParamInterface<DecodeTestParameters> {
void getParams() {
mInstanceName = std::get<0>(GetParam());
mComponentName = std::get<1>(GetParam());
@@ -452,9 +439,8 @@
description("Decodes input file");
if (mDisableTest) GTEST_SKIP() << "Test is disabled";
- uint32_t streamIndex = std::stoi(std::get<2>(GetParam()));
- ;
- bool signalEOS = !std::get<3>(GetParam()).compare("true");
+ uint32_t streamIndex = std::get<2>(GetParam());
+ bool signalEOS = std::get<3>(GetParam());
mTimestampDevTest = true;
char mURL[512], info[512];
android::Vector<FrameInfo> Info;
@@ -771,9 +757,8 @@
ASSERT_EQ(mComponent->stop(), C2_OK);
}
-class Codec2AudioDecCsdInputTests
- : public Codec2AudioDecHidlTestBase,
- public ::testing::WithParamInterface<std::tuple<std::string, std::string, std::string>> {
+class Codec2AudioDecCsdInputTests : public Codec2AudioDecHidlTestBase,
+ public ::testing::WithParamInterface<CsdFlushTestParameters> {
void getParams() {
mInstanceName = std::get<0>(GetParam());
mComponentName = std::get<1>(GetParam());
@@ -819,7 +804,7 @@
ASSERT_EQ(eleStream.is_open(), true);
bool signalEOS = false;
- bool flushCsd = !std::get<2>(GetParam()).compare("true");
+ bool flushCsd = std::get<2>(GetParam());
ALOGV("sending %d csd data ", numCsds);
int framesToDecode = numCsds;
ASSERT_NO_FATAL_FAILURE(decodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
@@ -875,16 +860,16 @@
}
INSTANTIATE_TEST_SUITE_P(PerInstance, Codec2AudioDecHidlTest, testing::ValuesIn(kTestParameters),
- android::hardware::PrintInstanceTupleNameToString<>);
+ PrintInstanceTupleNameToString<>);
// DecodeTest with StreamIndex and EOS / No EOS
INSTANTIATE_TEST_SUITE_P(StreamIndexAndEOS, Codec2AudioDecDecodeTest,
testing::ValuesIn(kDecodeTestParameters),
- android::hardware::PrintInstanceTupleNameToString<>);
+ PrintInstanceTupleNameToString<>);
INSTANTIATE_TEST_SUITE_P(CsdInputs, Codec2AudioDecCsdInputTests,
testing::ValuesIn(kCsdFlushTestParameters),
- android::hardware::PrintInstanceTupleNameToString<>);
+ PrintInstanceTupleNameToString<>);
} // anonymous namespace
@@ -893,18 +878,18 @@
kTestParameters = getTestParameters(C2Component::DOMAIN_AUDIO, C2Component::KIND_DECODER);
for (auto params : kTestParameters) {
kDecodeTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "0", "false"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 0, false));
kDecodeTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "0", "true"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 0, true));
kDecodeTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "1", "false"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 1, false));
kDecodeTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "1", "true"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 1, true));
kCsdFlushTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "true"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), true));
kCsdFlushTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "false"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), false));
}
::testing::InitGoogleTest(&argc, argv);
diff --git a/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioEncTest.cpp b/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioEncTest.cpp
index 1445e59..562c77f 100644
--- a/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioEncTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioEncTest.cpp
@@ -35,8 +35,9 @@
#include "media_c2_hidl_test_common.h"
-static std::vector<std::tuple<std::string, std::string, std::string, std::string>>
- kEncodeTestParameters;
+using EncodeTestParameters = std::tuple<std::string, std::string, bool, int32_t>;
+
+static std::vector<EncodeTestParameters> kEncodeTestParameters;
class LinearBuffer : public C2Buffer {
public:
@@ -72,30 +73,17 @@
mLinearPool = std::make_shared<C2PooledBlockPool>(mLinearAllocator, mBlockPoolId++);
ASSERT_NE(mLinearPool, nullptr);
- mCompName = unknown_comp;
- struct StringToName {
- const char* Name;
- standardComp CompName;
- };
- const StringToName kStringToName[] = {
- {"aac", aac}, {"flac", flac}, {"opus", opus}, {"amrnb", amrnb}, {"amrwb", amrwb},
- };
- const size_t kNumStringToName = sizeof(kStringToName) / sizeof(kStringToName[0]);
+ std::vector<std::unique_ptr<C2Param>> queried;
+ mComponent->query({}, {C2PortMediaTypeSetting::output::PARAM_TYPE}, C2_DONT_BLOCK,
+ &queried);
+ ASSERT_GT(queried.size(), 0);
- // Find the component type
- for (size_t i = 0; i < kNumStringToName; ++i) {
- if (strcasestr(mComponentName.c_str(), kStringToName[i].Name)) {
- mCompName = kStringToName[i].CompName;
- break;
- }
- }
+ mMime = ((C2PortMediaTypeSetting::output*)queried[0].get())->m.value;
mEos = false;
mCsd = false;
mFramesReceived = 0;
mWorkResult = C2_OK;
mOutputSize = 0u;
- if (mCompName == unknown_comp) mDisableTest = true;
- if (mDisableTest) std::cout << "[ WARN ] Test Disabled \n";
getInputMaxBufSize();
}
@@ -110,6 +98,8 @@
// Get the test parameters from GetParam call.
virtual void getParams() {}
+ void GetURLForComponent(char* mURL);
+
// callback function to process onWorkDone received by Listener
void handleWorkDone(std::list<std::unique_ptr<C2Work>>& workItems) {
for (std::unique_ptr<C2Work>& work : workItems) {
@@ -130,21 +120,13 @@
}
}
}
- enum standardComp {
- aac,
- flac,
- opus,
- amrnb,
- amrwb,
- unknown_comp,
- };
+ std::string mMime;
std::string mInstanceName;
std::string mComponentName;
bool mEos;
bool mCsd;
bool mDisableTest;
- standardComp mCompName;
int32_t mWorkResult;
uint32_t mFramesReceived;
@@ -189,9 +171,8 @@
}
};
-class Codec2AudioEncHidlTest
- : public Codec2AudioEncHidlTestBase,
- public ::testing::WithParamInterface<std::tuple<std::string, std::string>> {
+class Codec2AudioEncHidlTest : public Codec2AudioEncHidlTestBase,
+ public ::testing::WithParamInterface<TestParameters> {
void getParams() {
mInstanceName = std::get<0>(GetParam());
mComponentName = std::get<1>(GetParam());
@@ -199,7 +180,7 @@
};
void validateComponent(const std::shared_ptr<android::Codec2Client::Component>& component,
- Codec2AudioEncHidlTest::standardComp compName, bool& disableTest) {
+ bool& disableTest) {
// Validate its a C2 Component
if (component->getName().find("c2") == std::string::npos) {
ALOGE("Not a c2 component");
@@ -226,13 +207,6 @@
return;
}
}
-
- // Validates component name
- if (compName == Codec2AudioEncHidlTest::unknown_comp) {
- ALOGE("Component InValid");
- disableTest = true;
- return;
- }
ALOGV("Component Valid");
}
@@ -250,56 +224,48 @@
}
// Get config params for a component
-bool getConfigParams(Codec2AudioEncHidlTest::standardComp compName, int32_t* nChannels,
- int32_t* nSampleRate, int32_t* samplesPerFrame) {
- switch (compName) {
- case Codec2AudioEncHidlTest::aac:
- *nChannels = 2;
- *nSampleRate = 48000;
- *samplesPerFrame = 1024;
- break;
- case Codec2AudioEncHidlTest::flac:
- *nChannels = 2;
- *nSampleRate = 48000;
- *samplesPerFrame = 1152;
- break;
- case Codec2AudioEncHidlTest::opus:
- *nChannels = 2;
- *nSampleRate = 48000;
- *samplesPerFrame = 960;
- break;
- case Codec2AudioEncHidlTest::amrnb:
- *nChannels = 1;
- *nSampleRate = 8000;
- *samplesPerFrame = 160;
- break;
- case Codec2AudioEncHidlTest::amrwb:
- *nChannels = 1;
- *nSampleRate = 16000;
- *samplesPerFrame = 160;
- break;
- default:
- return false;
- }
+bool getConfigParams(std::string mime, int32_t* nChannels, int32_t* nSampleRate,
+ int32_t* samplesPerFrame) {
+ if (mime.find("mp4a-latm") != std::string::npos) {
+ *nChannels = 2;
+ *nSampleRate = 48000;
+ *samplesPerFrame = 1024;
+ } else if (mime.find("flac") != std::string::npos) {
+ *nChannels = 2;
+ *nSampleRate = 48000;
+ *samplesPerFrame = 1152;
+ } else if (mime.find("opus") != std::string::npos) {
+ *nChannels = 2;
+ *nSampleRate = 48000;
+ *samplesPerFrame = 960;
+ } else if (mime.find("3gpp") != std::string::npos) {
+ *nChannels = 1;
+ *nSampleRate = 8000;
+ *samplesPerFrame = 160;
+ } else if (mime.find("amr-wb") != std::string::npos) {
+ *nChannels = 1;
+ *nSampleRate = 16000;
+ *samplesPerFrame = 160;
+ } else
+ return false;
+
return true;
}
// LookUpTable of clips and metadata for component testing
-void GetURLForComponent(Codec2AudioEncHidlTest::standardComp comp, char* mURL) {
+void Codec2AudioEncHidlTestBase::GetURLForComponent(char* mURL) {
struct CompToURL {
- Codec2AudioEncHidlTest::standardComp comp;
+ std::string mime;
const char* mURL;
};
static const CompToURL kCompToURL[] = {
- {Codec2AudioEncHidlTest::standardComp::aac, "bbb_raw_2ch_48khz_s16le.raw"},
- {Codec2AudioEncHidlTest::standardComp::amrnb, "bbb_raw_1ch_8khz_s16le.raw"},
- {Codec2AudioEncHidlTest::standardComp::amrwb, "bbb_raw_1ch_16khz_s16le.raw"},
- {Codec2AudioEncHidlTest::standardComp::flac, "bbb_raw_2ch_48khz_s16le.raw"},
- {Codec2AudioEncHidlTest::standardComp::opus, "bbb_raw_2ch_48khz_s16le.raw"},
+ {"mp4a-latm", "bbb_raw_2ch_48khz_s16le.raw"}, {"3gpp", "bbb_raw_1ch_8khz_s16le.raw"},
+ {"amr-wb", "bbb_raw_1ch_16khz_s16le.raw"}, {"flac", "bbb_raw_2ch_48khz_s16le.raw"},
+ {"opus", "bbb_raw_2ch_48khz_s16le.raw"},
};
for (size_t i = 0; i < sizeof(kCompToURL) / sizeof(kCompToURL[0]); ++i) {
- if (kCompToURL[i].comp == comp) {
+ if (mMime.find(kCompToURL[i].mime) != std::string::npos) {
strcat(mURL, kCompToURL[i].mURL);
return;
}
@@ -392,14 +358,12 @@
TEST_P(Codec2AudioEncHidlTest, validateCompName) {
if (mDisableTest) GTEST_SKIP() << "Test is disabled";
ALOGV("Checks if the given component is a valid audio component");
- validateComponent(mComponent, mCompName, mDisableTest);
+ validateComponent(mComponent, mDisableTest);
ASSERT_EQ(mDisableTest, false);
}
-class Codec2AudioEncEncodeTest
- : public Codec2AudioEncHidlTestBase,
- public ::testing::WithParamInterface<
- std::tuple<std::string, std::string, std::string, std::string>> {
+class Codec2AudioEncEncodeTest : public Codec2AudioEncHidlTestBase,
+ public ::testing::WithParamInterface<EncodeTestParameters> {
void getParams() {
mInstanceName = std::get<0>(GetParam());
mComponentName = std::get<1>(GetParam());
@@ -411,17 +375,17 @@
if (mDisableTest) GTEST_SKIP() << "Test is disabled";
char mURL[512];
strcpy(mURL, sResourceDir.c_str());
- GetURLForComponent(mCompName, mURL);
- bool signalEOS = !std::get<2>(GetParam()).compare("true");
+ GetURLForComponent(mURL);
+ bool signalEOS = std::get<2>(GetParam());
// Ratio w.r.t to mInputMaxBufSize
- int32_t inputMaxBufRatio = std::stoi(std::get<3>(GetParam()));
+ int32_t inputMaxBufRatio = std::get<3>(GetParam());
int32_t nChannels;
int32_t nSampleRate;
int32_t samplesPerFrame;
- if (!getConfigParams(mCompName, &nChannels, &nSampleRate, &samplesPerFrame)) {
- std::cout << "Failed to get the config params for " << mCompName << " component\n";
+ if (!getConfigParams(mMime, &nChannels, &nSampleRate, &samplesPerFrame)) {
+ std::cout << "Failed to get the config params for " << mComponentName << "\n";
std::cout << "[ WARN ] Test Skipped \n";
return;
}
@@ -461,11 +425,9 @@
ALOGE("framesReceived : %d inputFrames : %u", mFramesReceived, numFrames);
ASSERT_TRUE(false);
}
- if ((mCompName == flac || mCompName == opus || mCompName == aac)) {
- if (!mCsd) {
- ALOGE("CSD buffer missing");
- ASSERT_TRUE(false);
- }
+ if ((mMime.find("flac") != std::string::npos) || (mMime.find("opus") != std::string::npos) ||
+ (mMime.find("mp4a-latm") != std::string::npos)) {
+ ASSERT_TRUE(mCsd) << "CSD buffer missing";
}
ASSERT_EQ(mEos, true);
ASSERT_EQ(mComponent->stop(), C2_OK);
@@ -519,15 +481,15 @@
char mURL[512];
strcpy(mURL, sResourceDir.c_str());
- GetURLForComponent(mCompName, mURL);
+ GetURLForComponent(mURL);
mFlushedIndices.clear();
int32_t nChannels;
int32_t nSampleRate;
int32_t samplesPerFrame;
- if (!getConfigParams(mCompName, &nChannels, &nSampleRate, &samplesPerFrame)) {
- std::cout << "Failed to get the config params for " << mCompName << " component\n";
+ if (!getConfigParams(mMime, &nChannels, &nSampleRate, &samplesPerFrame)) {
+ std::cout << "Failed to get the config params for " << mComponentName << "\n";
std::cout << "[ WARN ] Test Skipped \n";
return;
}
@@ -584,7 +546,7 @@
char mURL[512];
strcpy(mURL, sResourceDir.c_str());
- GetURLForComponent(mCompName, mURL);
+ GetURLForComponent(mURL);
std::ifstream eleStream;
eleStream.open(mURL, std::ifstream::binary);
@@ -597,8 +559,8 @@
int32_t numFrames = 16;
int32_t maxChannelCount = 8;
- if (!getConfigParams(mCompName, &nChannels, &nSampleRate, &samplesPerFrame)) {
- std::cout << "Failed to get the config params for " << mCompName << " component\n";
+ if (!getConfigParams(mMime, &nChannels, &nSampleRate, &samplesPerFrame)) {
+ std::cout << "Failed to get the config params for " << mComponentName << "\n";
std::cout << "[ WARN ] Test Skipped \n";
return;
}
@@ -608,7 +570,7 @@
// Looping through the maximum number of channel count supported by encoder
for (nChannels = 1; nChannels < maxChannelCount; nChannels++) {
- ALOGV("Configuring %u encoder for channel count = %d", mCompName, nChannels);
+ ALOGV("Configuring encoder %s for channel count = %d", mComponentName.c_str(), nChannels);
if (!setupConfigParam(mComponent, nChannels, nSampleRate)) {
std::cout << "[ WARN ] Test Skipped \n";
return;
@@ -665,7 +627,9 @@
ALOGE("framesReceived : %d inputFrames : %u", mFramesReceived, numFrames);
ASSERT_TRUE(false);
}
- if ((mCompName == flac || mCompName == opus || mCompName == aac)) {
+ if ((mMime.find("flac") != std::string::npos) ||
+ (mMime.find("opus") != std::string::npos) ||
+ (mMime.find("mp4a-latm") != std::string::npos)) {
ASSERT_TRUE(mCsd) << "CSD buffer missing";
}
ASSERT_TRUE(mEos);
@@ -684,7 +648,7 @@
char mURL[512];
strcpy(mURL, sResourceDir.c_str());
- GetURLForComponent(mCompName, mURL);
+ GetURLForComponent(mURL);
std::ifstream eleStream;
eleStream.open(mURL, std::ifstream::binary);
@@ -696,8 +660,8 @@
int32_t nChannels;
int32_t numFrames = 16;
- if (!getConfigParams(mCompName, &nChannels, &nSampleRate, &samplesPerFrame)) {
- std::cout << "Failed to get the config params for " << mCompName << " component\n";
+ if (!getConfigParams(mMime, &nChannels, &nSampleRate, &samplesPerFrame)) {
+ std::cout << "Failed to get the config params for " << mComponentName << "\n";
std::cout << "[ WARN ] Test Skipped \n";
return;
}
@@ -708,7 +672,7 @@
uint32_t prevSampleRate = 0u;
for (int32_t nSampleRate : sampleRateValues) {
- ALOGV("Configuring %u encoder for SampleRate = %d", mCompName, nSampleRate);
+ ALOGV("Configuring encoder %s for SampleRate = %d", mComponentName.c_str(), nSampleRate);
if (!setupConfigParam(mComponent, nChannels, nSampleRate)) {
std::cout << "[ WARN ] Test Skipped \n";
return;
@@ -769,7 +733,9 @@
ALOGE("framesReceived : %d inputFrames : %u", mFramesReceived, numFrames);
ASSERT_TRUE(false);
}
- if ((mCompName == flac || mCompName == opus || mCompName == aac)) {
+ if ((mMime.find("flac") != std::string::npos) ||
+ (mMime.find("opus") != std::string::npos) ||
+ (mMime.find("mp4a-latm") != std::string::npos)) {
ASSERT_TRUE(mCsd) << "CSD buffer missing";
}
ASSERT_TRUE(mEos);
@@ -783,13 +749,13 @@
}
INSTANTIATE_TEST_SUITE_P(PerInstance, Codec2AudioEncHidlTest, testing::ValuesIn(kTestParameters),
- android::hardware::PrintInstanceTupleNameToString<>);
+ PrintInstanceTupleNameToString<>);
// EncodeTest with EOS / No EOS and inputMaxBufRatio
// inputMaxBufRatio is ratio w.r.t. to mInputMaxBufSize
INSTANTIATE_TEST_SUITE_P(EncodeTest, Codec2AudioEncEncodeTest,
testing::ValuesIn(kEncodeTestParameters),
- android::hardware::PrintInstanceTupleNameToString<>);
+ PrintInstanceTupleNameToString<>);
} // anonymous namespace
@@ -798,13 +764,13 @@
kTestParameters = getTestParameters(C2Component::DOMAIN_AUDIO, C2Component::KIND_ENCODER);
for (auto params : kTestParameters) {
kEncodeTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "false", "1"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), false, 1));
kEncodeTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "false", "2"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), false, 2));
kEncodeTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "true", "1"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), true, 1));
kEncodeTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "true", "2"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), true, 2));
}
::testing::InitGoogleTest(&argc, argv);
diff --git a/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp
index de34705..1f1681d 100644
--- a/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp
@@ -27,13 +27,13 @@
std::string sComponentNamePrefix = "";
static constexpr struct option kArgOptions[] = {
- {"res", required_argument, 0, 'P'},
- {"prefix", required_argument, 0, 'p'},
- {"help", required_argument, 0, 'h'},
- {nullptr, 0, nullptr, 0},
+ {"res", required_argument, 0, 'P'},
+ {"prefix", required_argument, 0, 'p'},
+ {"help", required_argument, 0, 'h'},
+ {nullptr, 0, nullptr, 0},
};
-void printUsage(char *me) {
+void printUsage(char* me) {
std::cerr << "VTS tests to test codec2 components \n";
std::cerr << "Usage: " << me << " [options] \n";
std::cerr << "\t -P, --res: Mandatory path to a folder that contains test resources \n";
@@ -49,17 +49,17 @@
int option_index;
while ((arg = getopt_long(argc, argv, ":P:p:h", kArgOptions, &option_index)) != -1) {
switch (arg) {
- case 'P':
- sResourceDir = optarg;
- break;
- case 'p':
- sComponentNamePrefix = optarg;
- break;
- case 'h':
- printUsage(argv[0]);
- break;
- default:
- break;
+ case 'P':
+ sResourceDir = optarg;
+ break;
+ case 'p':
+ sComponentNamePrefix = optarg;
+ break;
+ case 'h':
+ printUsage(argv[0]);
+ break;
+ default:
+ break;
}
}
}
@@ -134,8 +134,7 @@
for (size_t i = 0; i < updates.size(); ++i) {
C2Param* param = updates[i].get();
if (param->index() == C2StreamInitDataInfo::output::PARAM_TYPE) {
- C2StreamInitDataInfo::output* csdBuffer =
- (C2StreamInitDataInfo::output*)(param);
+ C2StreamInitDataInfo::output* csdBuffer = (C2StreamInitDataInfo::output*)(param);
size_t csdSize = csdBuffer->flexCount();
if (csdSize > 0) csd = true;
} else if ((param->index() == C2StreamSampleRateInfo::output::PARAM_TYPE) ||
@@ -160,8 +159,7 @@
typedef std::unique_lock<std::mutex> ULock;
ULock l(queueLock);
workQueue.push_back(std::move(work));
- if (!flushedIndices.empty() &&
- (frameIndexIt != flushedIndices.end())) {
+ if (!flushedIndices.empty() && (frameIndexIt != flushedIndices.end())) {
flushedIndices.erase(frameIndexIt);
}
queueCondition.notify_all();
@@ -178,15 +176,15 @@
}
// Return all test parameters, a list of tuple of <instance, component>
-const std::vector<std::tuple<std::string, std::string>>& getTestParameters() {
+const std::vector<TestParameters>& getTestParameters() {
return getTestParameters(C2Component::DOMAIN_OTHER, C2Component::KIND_OTHER);
}
// Return all test parameters, a list of tuple of <instance, component> with matching domain and
// kind.
-const std::vector<std::tuple<std::string, std::string>>& getTestParameters(
- C2Component::domain_t domain, C2Component::kind_t kind) {
- static std::vector<std::tuple<std::string, std::string>> parameters;
+const std::vector<TestParameters>& getTestParameters(C2Component::domain_t domain,
+ C2Component::kind_t kind) {
+ static std::vector<TestParameters> parameters;
auto instances = android::Codec2Client::GetServiceNames();
for (std::string instance : instances) {
diff --git a/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.h b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.h
index a2f1561..e74f247 100644
--- a/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.h
+++ b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.h
@@ -40,7 +40,8 @@
using namespace ::std::chrono;
-static std::vector<std::tuple<std::string, std::string>> kTestParameters;
+using TestParameters = std::tuple<std::string, std::string>;
+static std::vector<TestParameters> kTestParameters;
// Resource directory
extern std::string sResourceDir;
@@ -54,6 +55,18 @@
int64_t timestamp;
};
+template <typename... T>
+static inline std::string PrintInstanceTupleNameToString(
+ const testing::TestParamInfo<std::tuple<T...>>& info) {
+ std::stringstream ss;
+ std::apply([&ss](auto&&... elems) { ((ss << elems << '_'), ...); }, info.param);
+ ss << info.index;
+ std::string param_string = ss.str();
+ auto isNotAlphaNum = [](char c) { return !std::isalnum(c); };
+ std::replace_if(param_string.begin(), param_string.end(), isNotAlphaNum, '_');
+ return param_string;
+}
+
/*
* Handle Callback functions onWorkDone(), onTripped(),
* onError(), onDeath(), onFramesRendered()
@@ -114,12 +127,12 @@
void parseArgs(int argc, char** argv);
// Return all test parameters, a list of tuple of <instance, component>.
-const std::vector<std::tuple<std::string, std::string>>& getTestParameters();
+const std::vector<TestParameters>& getTestParameters();
// Return all test parameters, a list of tuple of <instance, component> with matching domain and
// kind.
-const std::vector<std::tuple<std::string, std::string>>& getTestParameters(
- C2Component::domain_t domain, C2Component::kind_t kind);
+const std::vector<TestParameters>& getTestParameters(C2Component::domain_t domain,
+ C2Component::kind_t kind);
/*
* common functions declarations
diff --git a/media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp b/media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp
index 0648dd9..29acd33 100644
--- a/media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp
@@ -53,9 +53,8 @@
}
namespace {
-
-static std::vector<std::tuple<std::string, std::string, std::string, std::string>>
- kInputTestParameters;
+using InputTestParameters = std::tuple<std::string, std::string, uint32_t, bool>;
+static std::vector<InputTestParameters> kInputTestParameters;
// google.codec2 Component test setup
class Codec2ComponentHidlTestBase : public ::testing::Test {
@@ -120,9 +119,8 @@
}
};
-class Codec2ComponentHidlTest
- : public Codec2ComponentHidlTestBase,
- public ::testing::WithParamInterface<std::tuple<std::string, std::string>> {
+class Codec2ComponentHidlTest : public Codec2ComponentHidlTestBase,
+ public ::testing::WithParamInterface<TestParameters> {
void getParams() {
mInstanceName = std::get<0>(GetParam());
mComponentName = std::get<1>(GetParam());
@@ -317,10 +315,8 @@
ASSERT_EQ(err, C2_OK);
}
-class Codec2ComponentInputTests
- : public Codec2ComponentHidlTestBase,
- public ::testing::WithParamInterface<
- std::tuple<std::string, std::string, std::string, std::string>> {
+class Codec2ComponentInputTests : public Codec2ComponentHidlTestBase,
+ public ::testing::WithParamInterface<InputTestParameters> {
void getParams() {
mInstanceName = std::get<0>(GetParam());
mComponentName = std::get<1>(GetParam());
@@ -330,8 +326,8 @@
TEST_P(Codec2ComponentInputTests, InputBufferTest) {
description("Tests for different inputs");
- uint32_t flags = std::stoul(std::get<2>(GetParam()));
- bool isNullBuffer = !std::get<3>(GetParam()).compare("true");
+ uint32_t flags = std::get<2>(GetParam());
+ bool isNullBuffer = std::get<3>(GetParam());
if (isNullBuffer)
ALOGD("Testing for null input buffer with flag : %u", flags);
else
@@ -350,11 +346,10 @@
}
INSTANTIATE_TEST_SUITE_P(PerInstance, Codec2ComponentHidlTest, testing::ValuesIn(kTestParameters),
- android::hardware::PrintInstanceTupleNameToString<>);
+ PrintInstanceTupleNameToString<>);
INSTANTIATE_TEST_CASE_P(NonStdInputs, Codec2ComponentInputTests,
- testing::ValuesIn(kInputTestParameters),
- android::hardware::PrintInstanceTupleNameToString<>);
+ testing::ValuesIn(kInputTestParameters), PrintInstanceTupleNameToString<>);
} // anonymous namespace
// TODO: Add test for Invalid work,
@@ -364,18 +359,15 @@
kTestParameters = getTestParameters();
for (auto params : kTestParameters) {
kInputTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "0", "true"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 0, true));
+ kInputTestParameters.push_back(std::make_tuple(std::get<0>(params), std::get<1>(params),
+ C2FrameData::FLAG_END_OF_STREAM, true));
kInputTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params),
- std::to_string(C2FrameData::FLAG_END_OF_STREAM), "true"));
- kInputTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "0", "false"));
- kInputTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params),
- std::to_string(C2FrameData::FLAG_CODEC_CONFIG), "false"));
- kInputTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params),
- std::to_string(C2FrameData::FLAG_END_OF_STREAM), "false"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 0, false));
+ kInputTestParameters.push_back(std::make_tuple(std::get<0>(params), std::get<1>(params),
+ C2FrameData::FLAG_CODEC_CONFIG, false));
+ kInputTestParameters.push_back(std::make_tuple(std::get<0>(params), std::get<1>(params),
+ C2FrameData::FLAG_END_OF_STREAM, false));
}
::testing::InitGoogleTest(&argc, argv);
diff --git a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
index f29da0e..d0a1c31 100644
--- a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
@@ -40,10 +40,11 @@
#include "media_c2_hidl_test_common.h"
#include "media_c2_video_hidl_test_common.h"
-static std::vector<std::tuple<std::string, std::string, std::string, std::string>>
- kDecodeTestParameters;
+using DecodeTestParameters = std::tuple<std::string, std::string, uint32_t, bool>;
+static std::vector<DecodeTestParameters> kDecodeTestParameters;
-static std::vector<std::tuple<std::string, std::string, std::string>> kCsdFlushTestParameters;
+using CsdFlushTestParameters = std::tuple<std::string, std::string, bool>;
+static std::vector<CsdFlushTestParameters> kCsdFlushTestParameters;
struct CompToURL {
std::string mime;
@@ -52,43 +53,30 @@
std::string chksum;
};
std::vector<CompToURL> kCompToURL = {
- {"avc",
- "bbb_avc_176x144_300kbps_60fps.h264", "bbb_avc_176x144_300kbps_60fps.info",
- "bbb_avc_176x144_300kbps_60fps_chksum.md5"},
- {"avc",
- "bbb_avc_640x360_768kbps_30fps.h264", "bbb_avc_640x360_768kbps_30fps.info",
- "bbb_avc_640x360_768kbps_30fps_chksum.md5"},
- {"hevc",
- "bbb_hevc_176x144_176kbps_60fps.hevc", "bbb_hevc_176x144_176kbps_60fps.info",
- "bbb_hevc_176x144_176kbps_60fps_chksum.md5"},
- {"hevc",
- "bbb_hevc_640x360_1600kbps_30fps.hevc", "bbb_hevc_640x360_1600kbps_30fps.info",
- "bbb_hevc_640x360_1600kbps_30fps_chksum.md5"},
- {"mpeg2",
- "bbb_mpeg2_176x144_105kbps_25fps.m2v", "bbb_mpeg2_176x144_105kbps_25fps.info", ""},
- {"mpeg2",
- "bbb_mpeg2_352x288_1mbps_60fps.m2v","bbb_mpeg2_352x288_1mbps_60fps.info", ""},
- {"3gpp",
- "bbb_h263_352x288_300kbps_12fps.h263", "bbb_h263_352x288_300kbps_12fps.info", ""},
- {"mp4v-es",
- "bbb_mpeg4_352x288_512kbps_30fps.m4v", "bbb_mpeg4_352x288_512kbps_30fps.info", ""},
- {"vp8",
- "bbb_vp8_176x144_240kbps_60fps.vp8", "bbb_vp8_176x144_240kbps_60fps.info", ""},
- {"vp8",
- "bbb_vp8_640x360_2mbps_30fps.vp8", "bbb_vp8_640x360_2mbps_30fps.info",
- "bbb_vp8_640x360_2mbps_30fps_chksm.md5"},
- {"vp9",
- "bbb_vp9_176x144_285kbps_60fps.vp9", "bbb_vp9_176x144_285kbps_60fps.info", ""},
- {"vp9",
- "bbb_vp9_640x360_1600kbps_30fps.vp9", "bbb_vp9_640x360_1600kbps_30fps.info",
- "bbb_vp9_640x360_1600kbps_30fps_chksm.md5"},
- {"vp9",
- "bbb_vp9_704x480_280kbps_24fps_altref_2.vp9",
- "bbb_vp9_704x480_280kbps_24fps_altref_2.info", ""},
- {"av01",
- "bbb_av1_640_360.av1", "bbb_av1_640_360.info", "bbb_av1_640_360_chksum.md5"},
- {"av01",
- "bbb_av1_176_144.av1", "bbb_av1_176_144.info", "bbb_av1_176_144_chksm.md5"},
+ {"avc", "bbb_avc_176x144_300kbps_60fps.h264", "bbb_avc_176x144_300kbps_60fps.info",
+ "bbb_avc_176x144_300kbps_60fps_chksum.md5"},
+ {"avc", "bbb_avc_640x360_768kbps_30fps.h264", "bbb_avc_640x360_768kbps_30fps.info",
+ "bbb_avc_640x360_768kbps_30fps_chksum.md5"},
+ {"hevc", "bbb_hevc_176x144_176kbps_60fps.hevc", "bbb_hevc_176x144_176kbps_60fps.info",
+ "bbb_hevc_176x144_176kbps_60fps_chksum.md5"},
+ {"hevc", "bbb_hevc_640x360_1600kbps_30fps.hevc", "bbb_hevc_640x360_1600kbps_30fps.info",
+ "bbb_hevc_640x360_1600kbps_30fps_chksum.md5"},
+ {"mpeg2", "bbb_mpeg2_176x144_105kbps_25fps.m2v", "bbb_mpeg2_176x144_105kbps_25fps.info",
+ ""},
+ {"mpeg2", "bbb_mpeg2_352x288_1mbps_60fps.m2v", "bbb_mpeg2_352x288_1mbps_60fps.info", ""},
+ {"3gpp", "bbb_h263_352x288_300kbps_12fps.h263", "bbb_h263_352x288_300kbps_12fps.info", ""},
+ {"mp4v-es", "bbb_mpeg4_352x288_512kbps_30fps.m4v", "bbb_mpeg4_352x288_512kbps_30fps.info",
+ ""},
+ {"vp8", "bbb_vp8_176x144_240kbps_60fps.vp8", "bbb_vp8_176x144_240kbps_60fps.info", ""},
+ {"vp8", "bbb_vp8_640x360_2mbps_30fps.vp8", "bbb_vp8_640x360_2mbps_30fps.info",
+ "bbb_vp8_640x360_2mbps_30fps_chksm.md5"},
+ {"vp9", "bbb_vp9_176x144_285kbps_60fps.vp9", "bbb_vp9_176x144_285kbps_60fps.info", ""},
+ {"vp9", "bbb_vp9_640x360_1600kbps_30fps.vp9", "bbb_vp9_640x360_1600kbps_30fps.info",
+ "bbb_vp9_640x360_1600kbps_30fps_chksm.md5"},
+ {"vp9", "bbb_vp9_704x480_280kbps_24fps_altref_2.vp9",
+ "bbb_vp9_704x480_280kbps_24fps_altref_2.info", ""},
+ {"av01", "bbb_av1_640_360.av1", "bbb_av1_640_360.info", "bbb_av1_640_360_chksum.md5"},
+ {"av01", "bbb_av1_176_144.av1", "bbb_av1_176_144.info", "bbb_av1_176_144_chksm.md5"},
};
class LinearBuffer : public C2Buffer {
@@ -251,8 +239,7 @@
if (!codecConfig && !work->worklets.front()->output.buffers.empty()) {
if (mReorderDepth < 0) {
C2PortReorderBufferDepthTuning::output reorderBufferDepth;
- mComponent->query({&reorderBufferDepth}, {}, C2_MAY_BLOCK,
- nullptr);
+ mComponent->query({&reorderBufferDepth}, {}, C2_MAY_BLOCK, nullptr);
mReorderDepth = reorderBufferDepth.value;
if (mReorderDepth > 0) {
// TODO: Add validation for reordered output
@@ -333,9 +320,8 @@
}
};
-class Codec2VideoDecHidlTest
- : public Codec2VideoDecHidlTestBase,
- public ::testing::WithParamInterface<std::tuple<std::string, std::string>> {
+class Codec2VideoDecHidlTest : public Codec2VideoDecHidlTestBase,
+ public ::testing::WithParamInterface<TestParameters> {
void getParams() {
mInstanceName = std::get<0>(GetParam());
mComponentName = std::get<1>(GetParam());
@@ -541,10 +527,8 @@
return false;
}
-class Codec2VideoDecDecodeTest
- : public Codec2VideoDecHidlTestBase,
- public ::testing::WithParamInterface<
- std::tuple<std::string, std::string, std::string, std::string>> {
+class Codec2VideoDecDecodeTest : public Codec2VideoDecHidlTestBase,
+ public ::testing::WithParamInterface<DecodeTestParameters> {
void getParams() {
mInstanceName = std::get<0>(GetParam());
mComponentName = std::get<1>(GetParam());
@@ -556,8 +540,8 @@
description("Decodes input file");
if (mDisableTest) GTEST_SKIP() << "Test is disabled";
- uint32_t streamIndex = std::stoi(std::get<2>(GetParam()));
- bool signalEOS = !std::get<2>(GetParam()).compare("true");
+ uint32_t streamIndex = std::get<2>(GetParam());
+ bool signalEOS = std::get<3>(GetParam());
mTimestampDevTest = true;
char mURL[512], info[512], chksum[512];
@@ -657,8 +641,8 @@
description("Adaptive Decode Test");
if (mDisableTest) GTEST_SKIP() << "Test is disabled";
if (!(strcasestr(mMime.c_str(), "avc") || strcasestr(mMime.c_str(), "hevc") ||
- strcasestr(mMime.c_str(), "vp8") || strcasestr(mMime.c_str(), "vp9") ||
- strcasestr(mMime.c_str(), "mpeg2"))) {
+ strcasestr(mMime.c_str(), "vp8") || strcasestr(mMime.c_str(), "vp9") ||
+ strcasestr(mMime.c_str(), "mpeg2"))) {
return;
}
@@ -987,9 +971,8 @@
}
}
-class Codec2VideoDecCsdInputTests
- : public Codec2VideoDecHidlTestBase,
- public ::testing::WithParamInterface<std::tuple<std::string, std::string, std::string>> {
+class Codec2VideoDecCsdInputTests : public Codec2VideoDecHidlTestBase,
+ public ::testing::WithParamInterface<CsdFlushTestParameters> {
void getParams() {
mInstanceName = std::get<0>(GetParam());
mComponentName = std::get<1>(GetParam());
@@ -1022,7 +1005,7 @@
bool flushedDecoder = false;
bool signalEOS = false;
bool keyFrame = false;
- bool flushCsd = !std::get<2>(GetParam()).compare("true");
+ bool flushCsd = std::get<2>(GetParam());
ALOGV("sending %d csd data ", numCsds);
int framesToDecode = numCsds;
@@ -1092,16 +1075,16 @@
}
INSTANTIATE_TEST_SUITE_P(PerInstance, Codec2VideoDecHidlTest, testing::ValuesIn(kTestParameters),
- android::hardware::PrintInstanceTupleNameToString<>);
+ PrintInstanceTupleNameToString<>);
// DecodeTest with StreamIndex and EOS / No EOS
INSTANTIATE_TEST_SUITE_P(StreamIndexAndEOS, Codec2VideoDecDecodeTest,
testing::ValuesIn(kDecodeTestParameters),
- android::hardware::PrintInstanceTupleNameToString<>);
+ PrintInstanceTupleNameToString<>);
INSTANTIATE_TEST_SUITE_P(CsdInputs, Codec2VideoDecCsdInputTests,
testing::ValuesIn(kCsdFlushTestParameters),
- android::hardware::PrintInstanceTupleNameToString<>);
+ PrintInstanceTupleNameToString<>);
} // anonymous namespace
@@ -1111,22 +1094,22 @@
kTestParameters = getTestParameters(C2Component::DOMAIN_VIDEO, C2Component::KIND_DECODER);
for (auto params : kTestParameters) {
kDecodeTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "0", "false"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 0, false));
kDecodeTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "0", "true"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 0, true));
kDecodeTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "1", "false"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 1, false));
kDecodeTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "1", "true"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 1, true));
kDecodeTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "2", "false"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 2, false));
kDecodeTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "2", "true"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 2, true));
kCsdFlushTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "true"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), true));
kCsdFlushTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "false"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), false));
}
::testing::InitGoogleTest(&argc, argv);
diff --git a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
index 7e35de7..23ceff4 100644
--- a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
@@ -41,10 +41,11 @@
: C2Buffer({block->share(C2Rect(block->width(), block->height()), ::C2Fence())}) {}
};
-static std::vector<std::tuple<std::string, std::string, std::string, std::string, std::string>>
- kEncodeTestParameters;
-static std::vector<std::tuple<std::string, std::string, std::string, std::string>>
- kEncodeResolutionTestParameters;
+using EncodeTestParameters = std::tuple<std::string, std::string, bool, bool, bool>;
+static std::vector<EncodeTestParameters> kEncodeTestParameters;
+
+using EncodeResolutionTestParameters = std::tuple<std::string, std::string, int32_t, int32_t>;
+static std::vector<EncodeResolutionTestParameters> kEncodeResolutionTestParameters;
namespace {
@@ -75,26 +76,13 @@
mGraphicPool = std::make_shared<C2PooledBlockPool>(mGraphicAllocator, mBlockPoolId++);
ASSERT_NE(mGraphicPool, nullptr);
- mCompName = unknown_comp;
- struct StringToName {
- const char* Name;
- standardComp CompName;
- };
+ std::vector<std::unique_ptr<C2Param>> queried;
+ mComponent->query({}, {C2PortMediaTypeSetting::output::PARAM_TYPE}, C2_DONT_BLOCK,
+ &queried);
+ ASSERT_GT(queried.size(), 0);
- const StringToName kStringToName[] = {
- {"h263", h263}, {"avc", avc}, {"mpeg4", mpeg4},
- {"hevc", hevc}, {"vp8", vp8}, {"vp9", vp9},
- };
-
- const size_t kNumStringToName = sizeof(kStringToName) / sizeof(kStringToName[0]);
-
- // Find the component type
- for (size_t i = 0; i < kNumStringToName; ++i) {
- if (strcasestr(mComponentName.c_str(), kStringToName[i].Name)) {
- mCompName = kStringToName[i].CompName;
- break;
- }
- }
+ mMime = ((C2PortMediaTypeSetting::output*)queried[0].get())->m.value;
+ std::cout << "mime : " << mMime << "\n";
mEos = false;
mCsd = false;
mConfigBPictures = false;
@@ -103,7 +91,6 @@
mTimestampUs = 0u;
mOutputSize = 0u;
mTimestampDevTest = false;
- if (mCompName == unknown_comp) mDisableTest = true;
C2SecureModeTuning secureModeTuning{};
mComponent->query({&secureModeTuning}, {}, C2_MAY_BLOCK, nullptr);
@@ -185,16 +172,7 @@
}
}
- enum standardComp {
- h263,
- avc,
- mpeg4,
- hevc,
- vp8,
- vp9,
- unknown_comp,
- };
-
+ std::string mMime;
std::string mInstanceName;
std::string mComponentName;
bool mEos;
@@ -202,7 +180,6 @@
bool mDisableTest;
bool mConfigBPictures;
bool mTimestampDevTest;
- standardComp mCompName;
uint32_t mFramesReceived;
uint32_t mFailedWorkReceived;
uint64_t mTimestampUs;
@@ -229,9 +206,8 @@
}
};
-class Codec2VideoEncHidlTest
- : public Codec2VideoEncHidlTestBase,
- public ::testing::WithParamInterface<std::tuple<std::string, std::string>> {
+class Codec2VideoEncHidlTest : public Codec2VideoEncHidlTestBase,
+ public ::testing::WithParamInterface<TestParameters> {
void getParams() {
mInstanceName = std::get<0>(GetParam());
mComponentName = std::get<1>(GetParam());
@@ -239,7 +215,7 @@
};
void validateComponent(const std::shared_ptr<android::Codec2Client::Component>& component,
- Codec2VideoEncHidlTest::standardComp compName, bool& disableTest) {
+ bool& disableTest) {
// Validate its a C2 Component
if (component->getName().find("c2") == std::string::npos) {
ALOGE("Not a c2 component");
@@ -266,13 +242,6 @@
return;
}
}
-
- // Validates component name
- if (compName == Codec2VideoEncHidlTest::unknown_comp) {
- ALOGE("Component InValid");
- disableTest = true;
- return;
- }
ALOGV("Component Valid");
}
@@ -403,14 +372,12 @@
TEST_P(Codec2VideoEncHidlTest, validateCompName) {
if (mDisableTest) GTEST_SKIP() << "Test is disabled";
ALOGV("Checks if the given component is a valid video component");
- validateComponent(mComponent, mCompName, mDisableTest);
+ validateComponent(mComponent, mDisableTest);
ASSERT_EQ(mDisableTest, false);
}
-class Codec2VideoEncEncodeTest
- : public Codec2VideoEncHidlTestBase,
- public ::testing::WithParamInterface<
- std::tuple<std::string, std::string, std::string, std::string, std::string>> {
+class Codec2VideoEncEncodeTest : public Codec2VideoEncHidlTestBase,
+ public ::testing::WithParamInterface<EncodeTestParameters> {
void getParams() {
mInstanceName = std::get<0>(GetParam());
mComponentName = std::get<1>(GetParam());
@@ -424,10 +391,10 @@
char mURL[512];
int32_t nWidth = ENC_DEFAULT_FRAME_WIDTH;
int32_t nHeight = ENC_DEFAULT_FRAME_HEIGHT;
- bool signalEOS = !std::get<2>(GetParam()).compare("true");
+ bool signalEOS = std::get<3>(GetParam());
// Send an empty frame to receive CSD data from encoder.
- bool sendEmptyFirstFrame = !std::get<3>(GetParam()).compare("true");
- mConfigBPictures = !std::get<4>(GetParam()).compare("true");
+ bool sendEmptyFirstFrame = std::get<3>(GetParam());
+ mConfigBPictures = std::get<4>(GetParam());
strcpy(mURL, sResourceDir.c_str());
GetURLForComponent(mURL);
@@ -515,9 +482,9 @@
ASSERT_TRUE(false);
}
- if (mCompName == vp8 || mCompName == h263) {
+ if ((mMime.find("vp8") != std::string::npos) || (mMime.find("3gpp") != std::string::npos)) {
ASSERT_FALSE(mCsd) << "CSD Buffer not expected";
- } else if (mCompName != vp9) {
+ } else if (mMime.find("vp9") == std::string::npos) {
ASSERT_TRUE(mCsd) << "CSD Buffer not received";
}
@@ -695,8 +662,7 @@
class Codec2VideoEncResolutionTest
: public Codec2VideoEncHidlTestBase,
- public ::testing::WithParamInterface<
- std::tuple<std::string, std::string, std::string, std::string>> {
+ public ::testing::WithParamInterface<EncodeResolutionTestParameters> {
void getParams() {
mInstanceName = std::get<0>(GetParam());
mComponentName = std::get<1>(GetParam());
@@ -708,8 +674,8 @@
if (mDisableTest) GTEST_SKIP() << "Test is disabled";
std::ifstream eleStream;
- int32_t nWidth = std::stoi(std::get<2>(GetParam()));
- int32_t nHeight = std::stoi(std::get<3>(GetParam()));
+ int32_t nWidth = std::get<2>(GetParam());
+ int32_t nHeight = std::get<3>(GetParam());
ALOGD("Trying encode for width %d height %d", nWidth, nHeight);
mEos = false;
@@ -741,14 +707,16 @@
}
INSTANTIATE_TEST_SUITE_P(PerInstance, Codec2VideoEncHidlTest, testing::ValuesIn(kTestParameters),
- android::hardware::PrintInstanceTupleNameToString<>);
+ PrintInstanceTupleNameToString<>);
INSTANTIATE_TEST_SUITE_P(NonStdSizes, Codec2VideoEncResolutionTest,
- ::testing::ValuesIn(kEncodeResolutionTestParameters));
+ ::testing::ValuesIn(kEncodeResolutionTestParameters),
+ PrintInstanceTupleNameToString<>);
// EncodeTest with EOS / No EOS
INSTANTIATE_TEST_SUITE_P(EncodeTestwithEOS, Codec2VideoEncEncodeTest,
- ::testing::ValuesIn(kEncodeTestParameters));
+ ::testing::ValuesIn(kEncodeTestParameters),
+ PrintInstanceTupleNameToString<>);
TEST_P(Codec2VideoEncHidlTest, AdaptiveBitrateTest) {
description("Encodes input file for different bitrates");
@@ -842,27 +810,23 @@
parseArgs(argc, argv);
kTestParameters = getTestParameters(C2Component::DOMAIN_VIDEO, C2Component::KIND_ENCODER);
for (auto params : kTestParameters) {
- constexpr char const* kBoolString[] = { "false", "true" };
for (size_t i = 0; i < 1 << 3; ++i) {
kEncodeTestParameters.push_back(std::make_tuple(
- std::get<0>(params), std::get<1>(params),
- kBoolString[i & 1],
- kBoolString[(i >> 1) & 1],
- kBoolString[(i >> 2) & 1]));
+ std::get<0>(params), std::get<1>(params), i & 1, (i >> 1) & 1, (i >> 2) & 1));
}
kEncodeResolutionTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "52", "18"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 52, 18));
kEncodeResolutionTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "365", "365"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 365, 365));
kEncodeResolutionTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "484", "362"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 484, 362));
kEncodeResolutionTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "244", "488"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 244, 488));
kEncodeResolutionTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "852", "608"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 852, 608));
kEncodeResolutionTestParameters.push_back(
- std::make_tuple(std::get<0>(params), std::get<1>(params), "1400", "442"));
+ std::make_tuple(std::get<0>(params), std::get<1>(params), 1400, 442));
}
::testing::InitGoogleTest(&argc, argv);
diff --git a/media/codec2/hidl/client/client.cpp b/media/codec2/hidl/client/client.cpp
index 0296004..d49141c 100644
--- a/media/codec2/hidl/client/client.cpp
+++ b/media/codec2/hidl/client/client.cpp
@@ -1503,8 +1503,7 @@
bqId = 0;
mOutputBufferQueue->configure(nullIgbp, generation, 0, nullptr);
} else {
- mOutputBufferQueue->configure(surface, generation, bqId,
- mBase1_2 ? &syncObj : nullptr);
+ mOutputBufferQueue->configure(surface, generation, bqId, nullptr);
}
ALOGD("surface generation remote change %u HAL ver: %s",
generation, syncObj ? "1.2" : "1.0");
diff --git a/media/codec2/hidl/plugin/FilterWrapper.cpp b/media/codec2/hidl/plugin/FilterWrapper.cpp
index 0b38bc1..bed8aeb 100644
--- a/media/codec2/hidl/plugin/FilterWrapper.cpp
+++ b/media/codec2/hidl/plugin/FilterWrapper.cpp
@@ -19,7 +19,6 @@
#include <android-base/logging.h>
#include <set>
-#include <sstream>
#include <dlfcn.h>
@@ -383,6 +382,9 @@
// Configure the next interface with the params.
std::vector<C2Param *> configParams;
for (size_t i = 0; i < heapParams.size(); ++i) {
+ if (!heapParams[i]) {
+ continue;
+ }
if (heapParams[i]->forStream()) {
heapParams[i] = C2Param::CopyAsStream(
*heapParams[i], false /* output */, heapParams[i]->stream());
@@ -782,10 +784,7 @@
if (C2_OK != mStore->createComponent(filter.traits.name, &comp)) {
return {};
}
- if (C2_OK != mStore->createInterface(filter.traits.name, &intf)) {
- return {};
- }
- filters.push_back({comp, intf, filter.traits, filter.desc});
+ filters.push_back({comp, comp->intf(), filter.traits, filter.desc});
}
return filters;
}
@@ -869,7 +868,7 @@
}
std::vector<Component> filters = createFilters();
std::shared_ptr wrapped = std::make_shared<WrappedDecoder>(
- comp, std::move(filters), weak_from_this());
+ comp, std::vector(filters), weak_from_this());
{
std::unique_lock lock(mWrappedComponentsMutex);
std::vector<std::weak_ptr<const C2Component>> &components =
diff --git a/media/codec2/hidl/services/seccomp_policy/android.hardware.media.c2@1.2-default-arm64.policy b/media/codec2/hidl/services/seccomp_policy/android.hardware.media.c2@1.2-default-arm64.policy
index f701987..5d0284f 100644
--- a/media/codec2/hidl/services/seccomp_policy/android.hardware.media.c2@1.2-default-arm64.policy
+++ b/media/codec2/hidl/services/seccomp_policy/android.hardware.media.c2@1.2-default-arm64.policy
@@ -35,7 +35,7 @@
# on ARM is statically loaded at 0xffff 0000. See
# http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0211h/Babfeega.html
# for more details.
-mremap: arg3 == 3
+mremap: arg3 == 3 || arg3 == MREMAP_MAYMOVE
munmap: 1
prctl: 1
writev: 1
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index 4ae4c8e..15d2989 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -997,7 +997,15 @@
// needed for decoders.
if (!(config->mDomain & Config::IS_ENCODER)) {
if (surface == nullptr) {
- format = flexPixelFormat.value_or(COLOR_FormatYUV420Flexible);
+ const char *prefix = "";
+ if (flexSemiPlanarPixelFormat) {
+ format = COLOR_FormatYUV420SemiPlanar;
+ prefix = "semi-";
+ } else {
+ format = COLOR_FormatYUV420Planar;
+ }
+ ALOGD("Client requested ByteBuffer mode decoder w/o color format set: "
+ "using default %splanar color format", prefix);
} else {
format = COLOR_FormatSurface;
}
@@ -1790,17 +1798,19 @@
}
status_t CCodec::setSurface(const sp<Surface> &surface) {
- Mutexed<std::unique_ptr<Config>>::Locked configLocked(mConfig);
- const std::unique_ptr<Config> &config = *configLocked;
- if (config->mTunneled && config->mSidebandHandle != nullptr) {
- sp<ANativeWindow> nativeWindow = static_cast<ANativeWindow *>(surface.get());
- status_t err = native_window_set_sideband_stream(
- nativeWindow.get(),
- const_cast<native_handle_t *>(config->mSidebandHandle->handle()));
- if (err != OK) {
- ALOGE("NativeWindow(%p) native_window_set_sideband_stream(%p) failed! (err %d).",
- nativeWindow.get(), config->mSidebandHandle->handle(), err);
- return err;
+ {
+ Mutexed<std::unique_ptr<Config>>::Locked configLocked(mConfig);
+ const std::unique_ptr<Config> &config = *configLocked;
+ if (config->mTunneled && config->mSidebandHandle != nullptr) {
+ sp<ANativeWindow> nativeWindow = static_cast<ANativeWindow *>(surface.get());
+ status_t err = native_window_set_sideband_stream(
+ nativeWindow.get(),
+ const_cast<native_handle_t *>(config->mSidebandHandle->handle()));
+ if (err != OK) {
+ ALOGE("NativeWindow(%p) native_window_set_sideband_stream(%p) failed! (err %d).",
+ nativeWindow.get(), config->mSidebandHandle->handle(), err);
+ return err;
+ }
}
}
return mChannel->setSurface(surface);
@@ -1935,6 +1945,12 @@
params->removeEntryAt(params->findEntryByName(KEY_BIT_RATE));
}
+ int32_t syncId = 0;
+ if (params->findInt32("audio-hw-sync", &syncId)
+ || params->findInt32("hw-av-sync-id", &syncId)) {
+ configureTunneledVideoPlayback(comp, nullptr, params);
+ }
+
Mutexed<std::unique_ptr<Config>>::Locked configLocked(mConfig);
const std::unique_ptr<Config> &config = *configLocked;
@@ -2135,80 +2151,92 @@
}
// handle configuration changes in work done
- Mutexed<std::unique_ptr<Config>>::Locked configLocked(mConfig);
- const std::unique_ptr<Config> &config = *configLocked;
- Config::Watcher<C2StreamInitDataInfo::output> initData =
- config->watch<C2StreamInitDataInfo::output>();
- if (!work->worklets.empty()
- && (work->worklets.front()->output.flags
- & C2FrameData::FLAG_DISCARD_FRAME) == 0) {
+ std::unique_ptr<C2Param> initData;
+ sp<AMessage> outputFormat = nullptr;
+ {
+ Mutexed<std::unique_ptr<Config>>::Locked configLocked(mConfig);
+ const std::unique_ptr<Config> &config = *configLocked;
+ Config::Watcher<C2StreamInitDataInfo::output> initDataWatcher =
+ config->watch<C2StreamInitDataInfo::output>();
+ if (!work->worklets.empty()
+ && (work->worklets.front()->output.flags
+ & C2FrameData::FLAG_DISCARD_FRAME) == 0) {
- // copy buffer info to config
- std::vector<std::unique_ptr<C2Param>> updates;
- for (const std::unique_ptr<C2Param> ¶m
- : work->worklets.front()->output.configUpdate) {
- updates.push_back(C2Param::Copy(*param));
- }
- unsigned stream = 0;
- for (const std::shared_ptr<C2Buffer> &buf : work->worklets.front()->output.buffers) {
- for (const std::shared_ptr<const C2Info> &info : buf->info()) {
- // move all info into output-stream #0 domain
- updates.emplace_back(C2Param::CopyAsStream(*info, true /* output */, stream));
+ // copy buffer info to config
+ std::vector<std::unique_ptr<C2Param>> updates;
+ for (const std::unique_ptr<C2Param> ¶m
+ : work->worklets.front()->output.configUpdate) {
+ updates.push_back(C2Param::Copy(*param));
+ }
+ unsigned stream = 0;
+ std::vector<std::shared_ptr<C2Buffer>> &outputBuffers =
+ work->worklets.front()->output.buffers;
+ for (const std::shared_ptr<C2Buffer> &buf : outputBuffers) {
+ for (const std::shared_ptr<const C2Info> &info : buf->info()) {
+ // move all info into output-stream #0 domain
+ updates.emplace_back(
+ C2Param::CopyAsStream(*info, true /* output */, stream));
+ }
+
+ const std::vector<C2ConstGraphicBlock> blocks = buf->data().graphicBlocks();
+ // for now only do the first block
+ if (!blocks.empty()) {
+ // ALOGV("got output buffer with crop %u,%u+%u,%u and size %u,%u",
+ // block.crop().left, block.crop().top,
+ // block.crop().width, block.crop().height,
+ // block.width(), block.height());
+ const C2ConstGraphicBlock &block = blocks[0];
+ updates.emplace_back(new C2StreamCropRectInfo::output(
+ stream, block.crop()));
+ updates.emplace_back(new C2StreamPictureSizeInfo::output(
+ stream, block.crop().width, block.crop().height));
+ }
+ ++stream;
}
- const std::vector<C2ConstGraphicBlock> blocks = buf->data().graphicBlocks();
- // for now only do the first block
- if (!blocks.empty()) {
- // ALOGV("got output buffer with crop %u,%u+%u,%u and size %u,%u",
- // block.crop().left, block.crop().top,
- // block.crop().width, block.crop().height,
- // block.width(), block.height());
- const C2ConstGraphicBlock &block = blocks[0];
- updates.emplace_back(new C2StreamCropRectInfo::output(stream, block.crop()));
- updates.emplace_back(new C2StreamPictureSizeInfo::output(
- stream, block.crop().width, block.crop().height));
- }
- ++stream;
- }
+ sp<AMessage> oldFormat = config->mOutputFormat;
+ config->updateConfiguration(updates, config->mOutputDomain);
+ RevertOutputFormatIfNeeded(oldFormat, config->mOutputFormat);
- sp<AMessage> outputFormat = config->mOutputFormat;
- config->updateConfiguration(updates, config->mOutputDomain);
- RevertOutputFormatIfNeeded(outputFormat, config->mOutputFormat);
-
- // copy standard infos to graphic buffers if not already present (otherwise, we
- // may overwrite the actual intermediate value with a final value)
- stream = 0;
- const static C2Param::Index stdGfxInfos[] = {
- C2StreamRotationInfo::output::PARAM_TYPE,
- C2StreamColorAspectsInfo::output::PARAM_TYPE,
- C2StreamDataSpaceInfo::output::PARAM_TYPE,
- C2StreamHdrStaticInfo::output::PARAM_TYPE,
- C2StreamHdr10PlusInfo::output::PARAM_TYPE,
- C2StreamPixelAspectRatioInfo::output::PARAM_TYPE,
- C2StreamSurfaceScalingInfo::output::PARAM_TYPE
- };
- for (const std::shared_ptr<C2Buffer> &buf : work->worklets.front()->output.buffers) {
- if (buf->data().graphicBlocks().size()) {
- for (C2Param::Index ix : stdGfxInfos) {
- if (!buf->hasInfo(ix)) {
- const C2Param *param =
- config->getConfigParameterValue(ix.withStream(stream));
- if (param) {
- std::shared_ptr<C2Param> info(C2Param::Copy(*param));
- buf->setInfo(std::static_pointer_cast<C2Info>(info));
+ // copy standard infos to graphic buffers if not already present (otherwise, we
+ // may overwrite the actual intermediate value with a final value)
+ stream = 0;
+ const static C2Param::Index stdGfxInfos[] = {
+ C2StreamRotationInfo::output::PARAM_TYPE,
+ C2StreamColorAspectsInfo::output::PARAM_TYPE,
+ C2StreamDataSpaceInfo::output::PARAM_TYPE,
+ C2StreamHdrStaticInfo::output::PARAM_TYPE,
+ C2StreamHdr10PlusInfo::output::PARAM_TYPE,
+ C2StreamPixelAspectRatioInfo::output::PARAM_TYPE,
+ C2StreamSurfaceScalingInfo::output::PARAM_TYPE
+ };
+ for (const std::shared_ptr<C2Buffer> &buf : outputBuffers) {
+ if (buf->data().graphicBlocks().size()) {
+ for (C2Param::Index ix : stdGfxInfos) {
+ if (!buf->hasInfo(ix)) {
+ const C2Param *param =
+ config->getConfigParameterValue(ix.withStream(stream));
+ if (param) {
+ std::shared_ptr<C2Param> info(C2Param::Copy(*param));
+ buf->setInfo(std::static_pointer_cast<C2Info>(info));
+ }
}
}
}
+ ++stream;
}
- ++stream;
}
- }
- if (config->mInputSurface) {
- config->mInputSurface->onInputBufferDone(work->input.ordinal.frameIndex);
+ if (config->mInputSurface) {
+ config->mInputSurface->onInputBufferDone(work->input.ordinal.frameIndex);
+ }
+ if (initDataWatcher.hasChanged()) {
+ initData = C2Param::Copy(*initDataWatcher.update().get());
+ }
+ outputFormat = config->mOutputFormat;
}
mChannel->onWorkDone(
- std::move(work), config->mOutputFormat,
- initData.hasChanged() ? initData.update().get() : nullptr);
+ std::move(work), outputFormat,
+ initData ? (C2StreamInitDataInfo::output *)initData.get() : nullptr);
break;
}
case kWhatWatch: {
@@ -2258,6 +2286,10 @@
return UNKNOWN_ERROR;
}
+ if (sidebandHandle == nullptr) {
+ return OK;
+ }
+
std::vector<std::unique_ptr<C2Param>> params;
c2err = comp->query({}, {C2PortTunnelHandleTuning::output::PARAM_TYPE}, C2_DONT_BLOCK, ¶ms);
if (c2err == C2_OK && params.size() == 1u) {
@@ -2289,9 +2321,13 @@
pendingDeadline = true;
}
}
- Mutexed<std::unique_ptr<Config>>::Locked configLocked(mConfig);
- const std::unique_ptr<Config> &config = *configLocked;
- if (config->mTunneled == false && name.empty()) {
+ bool tunneled = false;
+ {
+ Mutexed<std::unique_ptr<Config>>::Locked configLocked(mConfig);
+ const std::unique_ptr<Config> &config = *configLocked;
+ tunneled = config->mTunneled;
+ }
+ if (!tunneled && name.empty()) {
constexpr std::chrono::steady_clock::duration kWorkDurationThreshold = 3s;
std::chrono::steady_clock::duration elapsed = mChannel->elapsed();
if (elapsed >= kWorkDurationThreshold) {
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index c4f9d84..d0c1357 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -1368,7 +1368,7 @@
// about buffers from the previous generation do not interfere with the
// newly initialized pipeline capacity.
- {
+ if (inputFormat || outputFormat) {
Mutexed<PipelineWatcher>::Locked watcher(mPipelineWatcher);
watcher->inputDelay(inputDelayValue)
.pipelineDelay(pipelineDelayValue)
@@ -1468,14 +1468,14 @@
void CCodecBufferChannel::stop() {
mSync.stop();
mFirstValidFrameIndex = mFrameIndex.load(std::memory_order_relaxed);
- if (mInputSurface != nullptr) {
- mInputSurface.reset();
- }
- mPipelineWatcher.lock()->flush();
}
void CCodecBufferChannel::reset() {
stop();
+ if (mInputSurface != nullptr) {
+ mInputSurface.reset();
+ }
+ mPipelineWatcher.lock()->flush();
{
Mutexed<Input>::Locked input(mInput);
input->buffers.reset(new DummyInputBuffers(""));
@@ -1503,8 +1503,10 @@
void CCodecBufferChannel::flush(const std::list<std::unique_ptr<C2Work>> &flushedWork) {
ALOGV("[%s] flush", mName);
+ std::vector<uint64_t> indices;
std::list<std::unique_ptr<C2Work>> configs;
for (const std::unique_ptr<C2Work> &work : flushedWork) {
+ indices.push_back(work->input.ordinal.frameIndex.peeku());
if (!(work->input.flags & C2FrameData::FLAG_CODEC_CONFIG)) {
continue;
}
@@ -1517,6 +1519,7 @@
std::unique_ptr<C2Work> copy(new C2Work);
copy->input.flags = C2FrameData::flags_t(work->input.flags | C2FrameData::FLAG_DROP_FRAME);
copy->input.ordinal = work->input.ordinal;
+ copy->input.ordinal.frameIndex = mFrameIndex++;
copy->input.buffers.insert(
copy->input.buffers.begin(),
work->input.buffers.begin(),
@@ -1545,7 +1548,12 @@
output->buffers->flushStash();
}
}
- mPipelineWatcher.lock()->flush();
+ {
+ Mutexed<PipelineWatcher>::Locked watcher(mPipelineWatcher);
+ for (uint64_t index : indices) {
+ watcher->onWorkDone(index);
+ }
+ }
}
void CCodecBufferChannel::onWorkDone(
diff --git a/media/codec2/sfplugin/CCodecConfig.cpp b/media/codec2/sfplugin/CCodecConfig.cpp
index ad28545..27e87e6 100644
--- a/media/codec2/sfplugin/CCodecConfig.cpp
+++ b/media/codec2/sfplugin/CCodecConfig.cpp
@@ -362,7 +362,10 @@
.limitTo(D::OUTPUT & D::READ));
add(ConfigMapper(KEY_BIT_RATE, C2_PARAMKEY_BITRATE, "value")
- .limitTo(D::ENCODER & D::OUTPUT));
+ .limitTo(D::ENCODER & D::CODED));
+ // Some audio decoders require bitrate information to be set
+ add(ConfigMapper(KEY_BIT_RATE, C2_PARAMKEY_BITRATE, "value")
+ .limitTo(D::AUDIO & D::DECODER & D::CODED));
// we also need to put the bitrate in the max bitrate field
add(ConfigMapper(KEY_MAX_BIT_RATE, C2_PARAMKEY_BITRATE, "value")
.limitTo(D::ENCODER & D::READ & D::OUTPUT));
@@ -730,6 +733,17 @@
return C2Value();
}));
+ add(ConfigMapper(KEY_AAC_PROFILE, C2_PARAMKEY_PROFILE_LEVEL, "profile")
+ .limitTo(D::AUDIO & D::ENCODER & (D::CONFIG | D::PARAM))
+ .withMapper([mapper](C2Value v) -> C2Value {
+ C2Config::profile_t c2 = PROFILE_UNUSED;
+ int32_t sdk;
+ if (mapper && v.get(&sdk) && mapper->mapProfile(sdk, &c2)) {
+ return c2;
+ }
+ return PROFILE_UNUSED;
+ }));
+
// convert to dBFS and add default
add(ConfigMapper(KEY_AAC_DRC_TARGET_REFERENCE_LEVEL, C2_PARAMKEY_DRC_TARGET_REFERENCE_LEVEL, "value")
.limitTo(D::AUDIO & D::DECODER & (D::CONFIG | D::PARAM | D::READ))
@@ -1174,11 +1188,14 @@
bool changed = false;
if (domain & mInputDomain) {
- sp<AMessage> oldFormat = mInputFormat->dup();
+ sp<AMessage> oldFormat = mInputFormat;
+ mInputFormat = mInputFormat->dup(); // trigger format changed
mInputFormat->extend(getFormatForDomain(reflected, mInputDomain));
if (mInputFormat->countEntries() != oldFormat->countEntries()
|| mInputFormat->changesFrom(oldFormat)->countEntries() > 0) {
changed = true;
+ } else {
+ mInputFormat = oldFormat; // no change
}
}
if (domain & mOutputDomain) {
@@ -1319,6 +1336,14 @@
}
}
+ // Remove KEY_AAC_SBR_MODE from SDK message if it is outside supported range
+ // as SDK doesn't have a way to signal default sbr mode based on profile and
+ // requires that the key isn't present in format to signal that
+ int sbrMode;
+ if (msg->findInt32(KEY_AAC_SBR_MODE, &sbrMode) && (sbrMode < 0 || sbrMode > 2)) {
+ msg->removeEntryAt(msg->findEntryByName(KEY_AAC_SBR_MODE));
+ }
+
{ // convert color info
// move default color to color aspect if not read from the component
int32_t tmp;
diff --git a/media/codec2/sfplugin/FrameReassembler.cpp b/media/codec2/sfplugin/FrameReassembler.cpp
index 9cec23f..af054c7 100644
--- a/media/codec2/sfplugin/FrameReassembler.cpp
+++ b/media/codec2/sfplugin/FrameReassembler.cpp
@@ -143,6 +143,7 @@
if (buffer->size() > 0) {
mCurrentOrdinal.timestamp = timeUs;
+ mCurrentOrdinal.customOrdinal = timeUs;
}
size_t frameSizeBytes = mFrameSize.value() * mChannelCount * bytesPerSample();
@@ -219,6 +220,7 @@
++mCurrentOrdinal.frameIndex;
mCurrentOrdinal.timestamp += mFrameSize.value() * 1000000 / mSampleRate;
+ mCurrentOrdinal.customOrdinal = mCurrentOrdinal.timestamp;
mCurrentBlock.reset();
mWriteView.reset();
}
diff --git a/media/codec2/sfplugin/PipelineWatcher.cpp b/media/codec2/sfplugin/PipelineWatcher.cpp
index 0ee9056..bc9197c 100644
--- a/media/codec2/sfplugin/PipelineWatcher.cpp
+++ b/media/codec2/sfplugin/PipelineWatcher.cpp
@@ -95,6 +95,7 @@
}
void PipelineWatcher::flush() {
+ ALOGV("flush");
mFramesInPipeline.clear();
}
diff --git a/media/codec2/sfplugin/utils/Android.bp b/media/codec2/sfplugin/utils/Android.bp
index 74e7ef1..2f4d6b1 100644
--- a/media/codec2/sfplugin/utils/Android.bp
+++ b/media/codec2/sfplugin/utils/Android.bp
@@ -33,11 +33,13 @@
"libcodec2_vndk",
"libcutils",
"liblog",
+ "libnativewindow",
"libstagefright_foundation",
"libutils",
],
static_libs: [
+ "libarect",
"libyuv_static",
],
diff --git a/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp b/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
index a54af83..0966988 100644
--- a/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
+++ b/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
@@ -23,6 +23,7 @@
#include <list>
#include <mutex>
+#include <android/hardware_buffer.h>
#include <media/hardware/HardwareAPI.h>
#include <media/stagefright/foundation/AUtils.h>
@@ -136,31 +137,56 @@
int width = view.crop().width;
int height = view.crop().height;
- if ((IsNV12(view) && IsI420(img)) || (IsI420(view) && IsNV12(img))) {
- // Take shortcuts to use libyuv functions between NV12 and I420 conversion.
- if (IsNV12(view) && IsI420(img)) {
+ if (IsNV12(view)) {
+ if (IsNV12(img)) {
+ libyuv::CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
+ libyuv::CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, width, height / 2);
+ return OK;
+ } else if (IsNV21(img)) {
+ if (!libyuv::NV21ToNV12(src_y, src_stride_y, src_u, src_stride_u,
+ dst_y, dst_stride_y, dst_v, dst_stride_v, width, height)) {
+ return OK;
+ }
+ } else if (IsI420(img)) {
if (!libyuv::NV12ToI420(src_y, src_stride_y, src_u, src_stride_u, dst_y, dst_stride_y,
dst_u, dst_stride_u, dst_v, dst_stride_v, width, height)) {
return OK;
}
- } else {
+ }
+ } else if (IsNV21(view)) {
+ if (IsNV12(img)) {
+ if (!libyuv::NV21ToNV12(src_y, src_stride_y, src_v, src_stride_v,
+ dst_y, dst_stride_y, dst_u, dst_stride_u, width, height)) {
+ return OK;
+ }
+ } else if (IsNV21(img)) {
+ libyuv::CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
+ libyuv::CopyPlane(src_v, src_stride_v, dst_v, dst_stride_v, width, height / 2);
+ return OK;
+ } else if (IsI420(img)) {
+ if (!libyuv::NV21ToI420(src_y, src_stride_y, src_v, src_stride_v, dst_y, dst_stride_y,
+ dst_u, dst_stride_u, dst_v, dst_stride_v, width, height)) {
+ return OK;
+ }
+ }
+ } else if (IsI420(view)) {
+ if (IsNV12(img)) {
if (!libyuv::I420ToNV12(src_y, src_stride_y, src_u, src_stride_u, src_v, src_stride_v,
dst_y, dst_stride_y, dst_u, dst_stride_u, width, height)) {
return OK;
}
+ } else if (IsNV21(img)) {
+ if (!libyuv::I420ToNV21(src_y, src_stride_y, src_u, src_stride_u, src_v, src_stride_v,
+ dst_y, dst_stride_y, dst_v, dst_stride_v, width, height)) {
+ return OK;
+ }
+ } else if (IsI420(img)) {
+ libyuv::CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
+ libyuv::CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, width / 2, height / 2);
+ libyuv::CopyPlane(src_v, src_stride_v, dst_v, dst_stride_v, width / 2, height / 2);
+ return OK;
}
}
- if (IsNV12(view) && IsNV12(img)) {
- libyuv::CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
- libyuv::CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, width, height / 2);
- return OK;
- }
- if (IsI420(view) && IsI420(img)) {
- libyuv::CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
- libyuv::CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, width / 2, height / 2);
- libyuv::CopyPlane(src_v, src_stride_v, dst_v, dst_stride_v, width / 2, height / 2);
- return OK;
- }
return _ImageCopy<true>(view, img, imgBase);
}
@@ -182,33 +208,56 @@
int32_t dst_stride_v = view.layout().planes[2].rowInc;
int width = view.crop().width;
int height = view.crop().height;
- if ((IsNV12(img) && IsI420(view)) || (IsI420(img) && IsNV12(view))) {
- // Take shortcuts to use libyuv functions between NV12 and I420 conversion.
- if (IsNV12(img) && IsI420(view)) {
+ if (IsNV12(img)) {
+ if (IsNV12(view)) {
+ libyuv::CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
+ libyuv::CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, width, height / 2);
+ return OK;
+ } else if (IsNV21(view)) {
+ if (!libyuv::NV21ToNV12(src_y, src_stride_y, src_u, src_stride_u,
+ dst_y, dst_stride_y, dst_v, dst_stride_v, width, height)) {
+ return OK;
+ }
+ } else if (IsI420(view)) {
if (!libyuv::NV12ToI420(src_y, src_stride_y, src_u, src_stride_u, dst_y, dst_stride_y,
dst_u, dst_stride_u, dst_v, dst_stride_v, width, height)) {
return OK;
}
- } else {
+ }
+ } else if (IsNV21(img)) {
+ if (IsNV12(view)) {
+ if (!libyuv::NV21ToNV12(src_y, src_stride_y, src_v, src_stride_v,
+ dst_y, dst_stride_y, dst_u, dst_stride_u, width, height)) {
+ return OK;
+ }
+ } else if (IsNV21(view)) {
+ libyuv::CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
+ libyuv::CopyPlane(src_v, src_stride_v, dst_v, dst_stride_v, width, height / 2);
+ return OK;
+ } else if (IsI420(view)) {
+ if (!libyuv::NV21ToI420(src_y, src_stride_y, src_v, src_stride_v, dst_y, dst_stride_y,
+ dst_u, dst_stride_u, dst_v, dst_stride_v, width, height)) {
+ return OK;
+ }
+ }
+ } else if (IsI420(img)) {
+ if (IsNV12(view)) {
if (!libyuv::I420ToNV12(src_y, src_stride_y, src_u, src_stride_u, src_v, src_stride_v,
dst_y, dst_stride_y, dst_u, dst_stride_u, width, height)) {
return OK;
}
+ } else if (IsNV21(view)) {
+ if (!libyuv::I420ToNV21(src_y, src_stride_y, src_u, src_stride_u, src_v, src_stride_v,
+ dst_y, dst_stride_y, dst_v, dst_stride_v, width, height)) {
+ return OK;
+ }
+ } else if (IsI420(view)) {
+ libyuv::CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
+ libyuv::CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, width / 2, height / 2);
+ libyuv::CopyPlane(src_v, src_stride_v, dst_v, dst_stride_v, width / 2, height / 2);
+ return OK;
}
}
- if (IsNV12(img) && IsNV12(view)) {
- // For NV12, copy Y and UV plane
- libyuv::CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
- libyuv::CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, width, height / 2);
- return OK;
- }
- if (IsI420(img) && IsI420(view)) {
- // For I420, copy Y, U and V plane.
- libyuv::CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
- libyuv::CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, width / 2, height / 2);
- libyuv::CopyPlane(src_v, src_stride_v, dst_v, dst_stride_v, width / 2, height / 2);
- return OK;
- }
return _ImageCopy<false>(view, img, imgBase);
}
@@ -250,6 +299,20 @@
&& layout.planes[layout.PLANE_V].offset == 1);
}
+bool IsNV21(const C2GraphicView &view) {
+ if (!IsYUV420(view)) {
+ return false;
+ }
+ const C2PlanarLayout &layout = view.layout();
+ return (layout.rootPlanes == 2
+ && layout.planes[layout.PLANE_U].colInc == 2
+ && layout.planes[layout.PLANE_U].rootIx == layout.PLANE_V
+ && layout.planes[layout.PLANE_U].offset == 1
+ && layout.planes[layout.PLANE_V].colInc == 2
+ && layout.planes[layout.PLANE_V].rootIx == layout.PLANE_V
+ && layout.planes[layout.PLANE_V].offset == 0);
+}
+
bool IsI420(const C2GraphicView &view) {
if (!IsYUV420(view)) {
return false;
@@ -283,7 +346,16 @@
}
return (img->mPlane[1].mColInc == 2
&& img->mPlane[2].mColInc == 2
- && (img->mPlane[2].mOffset - img->mPlane[1].mOffset == 1));
+ && (img->mPlane[2].mOffset == img->mPlane[1].mOffset + 1));
+}
+
+bool IsNV21(const MediaImage2 *img) {
+ if (!IsYUV420(img)) {
+ return false;
+ }
+ return (img->mPlane[1].mColInc == 2
+ && img->mPlane[2].mColInc == 2
+ && (img->mPlane[1].mOffset == img->mPlane[2].mOffset + 1));
}
bool IsI420(const MediaImage2 *img) {
@@ -295,6 +367,76 @@
&& img->mPlane[2].mOffset > img->mPlane[1].mOffset);
}
+FlexLayout GetYuv420FlexibleLayout() {
+ static FlexLayout sLayout = []{
+ AHardwareBuffer_Desc desc = {
+ 16, // width
+ 16, // height
+ 1, // layers
+ AHARDWAREBUFFER_FORMAT_Y8Cb8Cr8_420,
+ AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN,
+ 0, // stride
+ 0, // rfu0
+ 0, // rfu1
+ };
+ AHardwareBuffer *buffer = nullptr;
+ int ret = AHardwareBuffer_allocate(&desc, &buffer);
+ if (ret != 0) {
+ return FLEX_LAYOUT_UNKNOWN;
+ }
+ class AutoCloser {
+ public:
+ AutoCloser(AHardwareBuffer *buffer) : mBuffer(buffer), mLocked(false) {}
+ ~AutoCloser() {
+ if (mLocked) {
+ AHardwareBuffer_unlock(mBuffer, nullptr);
+ }
+ AHardwareBuffer_release(mBuffer);
+ }
+
+ void setLocked() { mLocked = true; }
+
+ private:
+ AHardwareBuffer *mBuffer;
+ bool mLocked;
+ } autoCloser(buffer);
+ AHardwareBuffer_Planes planes;
+ ret = AHardwareBuffer_lockPlanes(
+ buffer,
+ AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN,
+ -1, // fence
+ nullptr, // rect
+ &planes);
+ if (ret != 0) {
+ AHardwareBuffer_release(buffer);
+ return FLEX_LAYOUT_UNKNOWN;
+ }
+ autoCloser.setLocked();
+ if (planes.planeCount != 3) {
+ return FLEX_LAYOUT_UNKNOWN;
+ }
+ if (planes.planes[0].pixelStride != 1) {
+ return FLEX_LAYOUT_UNKNOWN;
+ }
+ if (planes.planes[1].pixelStride == 1 && planes.planes[2].pixelStride == 1) {
+ return FLEX_LAYOUT_PLANAR;
+ }
+ if (planes.planes[1].pixelStride == 2 && planes.planes[2].pixelStride == 2) {
+ ssize_t uvDist =
+ static_cast<uint8_t *>(planes.planes[2].data) -
+ static_cast<uint8_t *>(planes.planes[1].data);
+ if (uvDist == 1) {
+ return FLEX_LAYOUT_SEMIPLANAR_UV;
+ } else if (uvDist == -1) {
+ return FLEX_LAYOUT_SEMIPLANAR_VU;
+ }
+ return FLEX_LAYOUT_UNKNOWN;
+ }
+ return FLEX_LAYOUT_UNKNOWN;
+ }();
+ return sLayout;
+}
+
MediaImage2 CreateYUV420PlanarMediaImage2(
uint32_t width, uint32_t height, uint32_t stride, uint32_t vstride) {
return MediaImage2 {
diff --git a/media/codec2/sfplugin/utils/Codec2BufferUtils.h b/media/codec2/sfplugin/utils/Codec2BufferUtils.h
index afadf00..af29e81 100644
--- a/media/codec2/sfplugin/utils/Codec2BufferUtils.h
+++ b/media/codec2/sfplugin/utils/Codec2BufferUtils.h
@@ -96,6 +96,11 @@
bool IsNV12(const C2GraphicView &view);
/**
+ * Returns true iff a view has a NV21 layout.
+ */
+bool IsNV21(const C2GraphicView &view);
+
+/**
* Returns true iff a view has a I420 layout.
*/
bool IsI420(const C2GraphicView &view);
@@ -111,10 +116,26 @@
bool IsNV12(const MediaImage2 *img);
/**
+ * Returns true iff a MediaImage2 has a NV21 layout.
+ */
+bool IsNV21(const MediaImage2 *img);
+
+/**
* Returns true iff a MediaImage2 has a I420 layout.
*/
bool IsI420(const MediaImage2 *img);
+enum FlexLayout {
+ FLEX_LAYOUT_UNKNOWN,
+ FLEX_LAYOUT_PLANAR,
+ FLEX_LAYOUT_SEMIPLANAR_UV,
+ FLEX_LAYOUT_SEMIPLANAR_VU,
+};
+/**
+ * Returns layout of YCBCR_420_888 pixel format.
+ */
+FlexLayout GetYuv420FlexibleLayout();
+
/**
* A raw memory block to use for internal buffers.
*
diff --git a/media/codec2/vndk/C2AllocatorGralloc.cpp b/media/codec2/vndk/C2AllocatorGralloc.cpp
index bee6b7f..4ffa3f1 100644
--- a/media/codec2/vndk/C2AllocatorGralloc.cpp
+++ b/media/codec2/vndk/C2AllocatorGralloc.cpp
@@ -42,7 +42,9 @@
* Usage mask that is passed through from gralloc to Codec 2.0 usage.
*/
PASSTHROUGH_USAGE_MASK =
- ~(GRALLOC_USAGE_SW_READ_MASK | GRALLOC_USAGE_SW_WRITE_MASK | GRALLOC_USAGE_PROTECTED)
+ ~static_cast<uint64_t>(GRALLOC_USAGE_SW_READ_MASK |
+ GRALLOC_USAGE_SW_WRITE_MASK |
+ GRALLOC_USAGE_PROTECTED)
};
// verify that passthrough mask is within the platform mask
diff --git a/media/codec2/vndk/C2AllocatorIon.cpp b/media/codec2/vndk/C2AllocatorIon.cpp
index 85623b8..a8528df 100644
--- a/media/codec2/vndk/C2AllocatorIon.cpp
+++ b/media/codec2/vndk/C2AllocatorIon.cpp
@@ -30,10 +30,15 @@
#include <C2ErrnoUtils.h>
#include <C2HandleIonInternal.h>
+#include <android-base/properties.h>
+
namespace android {
namespace {
constexpr size_t USAGE_LRU_CACHE_SIZE = 1024;
+
+ // max padding after ion/dmabuf allocations in bytes
+ constexpr uint32_t MAX_PADDING = 0x8000; // 32KB
}
/* size_t <=> int(lo), int(hi) conversions */
@@ -376,14 +381,34 @@
unsigned heapMask, unsigned flags, C2Allocator::id_t id) {
int bufferFd = -1;
ion_user_handle_t buffer = -1;
- size_t alignedSize = align == 0 ? size : (size + align - 1) & ~(align - 1);
+ // NOTE: read this property directly from the property as this code has to run on
+ // Android Q, but the sysprop was only introduced in Android S.
+ static size_t sPadding =
+ base::GetUintProperty("media.c2.dmabuf.padding", (uint32_t)0, MAX_PADDING);
+ if (sPadding > SIZE_MAX - size) {
+ ALOGD("ion_alloc: size %#zx cannot accommodate padding %#zx", size, sPadding);
+ // use ImplV2 as there is no allocation anyways
+ return new ImplV2(ionFd, size, -1, id, -ENOMEM);
+ }
+
+ size_t allocSize = size + sPadding;
+ if (align) {
+ if (align - 1 > SIZE_MAX - allocSize) {
+ ALOGD("ion_alloc: size %#zx cannot accommodate padding %#zx and alignment %#zx",
+ size, sPadding, align);
+ // use ImplV2 as there is no allocation anyways
+ return new ImplV2(ionFd, size, -1, id, -ENOMEM);
+ }
+ allocSize += align - 1;
+ allocSize &= ~(align - 1);
+ }
int ret;
if (ion_is_legacy(ionFd)) {
- ret = ion_alloc(ionFd, alignedSize, align, heapMask, flags, &buffer);
+ ret = ion_alloc(ionFd, allocSize, align, heapMask, flags, &buffer);
ALOGV("ion_alloc(ionFd = %d, size = %zu, align = %zu, prot = %d, flags = %d) "
"returned (%d) ; buffer = %d",
- ionFd, alignedSize, align, heapMask, flags, ret, buffer);
+ ionFd, allocSize, align, heapMask, flags, ret, buffer);
if (ret == 0) {
// get buffer fd for native handle constructor
ret = ion_share(ionFd, buffer, &bufferFd);
@@ -392,15 +417,15 @@
buffer = -1;
}
}
- return new Impl(ionFd, alignedSize, bufferFd, buffer, id, ret);
+ return new Impl(ionFd, allocSize, bufferFd, buffer, id, ret);
} else {
- ret = ion_alloc_fd(ionFd, alignedSize, align, heapMask, flags, &bufferFd);
+ ret = ion_alloc_fd(ionFd, allocSize, align, heapMask, flags, &bufferFd);
ALOGV("ion_alloc_fd(ionFd = %d, size = %zu, align = %zu, prot = %d, flags = %d) "
"returned (%d) ; bufferFd = %d",
- ionFd, alignedSize, align, heapMask, flags, ret, bufferFd);
+ ionFd, allocSize, align, heapMask, flags, ret, bufferFd);
- return new ImplV2(ionFd, alignedSize, bufferFd, id, ret);
+ return new ImplV2(ionFd, allocSize, bufferFd, id, ret);
}
}
diff --git a/media/codec2/vndk/C2DmaBufAllocator.cpp b/media/codec2/vndk/C2DmaBufAllocator.cpp
index 750aa31..6d8552a 100644
--- a/media/codec2/vndk/C2DmaBufAllocator.cpp
+++ b/media/codec2/vndk/C2DmaBufAllocator.cpp
@@ -16,11 +16,13 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "C2DmaBufAllocator"
+
#include <BufferAllocator/BufferAllocator.h>
#include <C2Buffer.h>
#include <C2Debug.h>
#include <C2DmaBufAllocator.h>
#include <C2ErrnoUtils.h>
+
#include <linux/ion.h>
#include <sys/mman.h>
#include <unistd.h> // getpagesize, size_t, close, dup
@@ -28,14 +30,15 @@
#include <list>
-#ifdef __ANDROID_APEX__
#include <android-base/properties.h>
-#endif
namespace android {
namespace {
-constexpr size_t USAGE_LRU_CACHE_SIZE = 1024;
+ constexpr size_t USAGE_LRU_CACHE_SIZE = 1024;
+
+ // max padding after ion/dmabuf allocations in bytes
+ constexpr uint32_t MAX_PADDING = 0x8000; // 32KB
}
/* =========================== BUFFER HANDLE =========================== */
@@ -250,8 +253,11 @@
int ret = 0;
bufferFd = alloc.Alloc(heap_name, size, flags);
- if (bufferFd < 0) ret = bufferFd;
+ if (bufferFd < 0) {
+ ret = bufferFd;
+ }
+ // this may be a non-working handle if bufferFd is negative
mHandle = C2HandleBuf(bufferFd, size);
mId = id;
mInit = c2_status_t(c2_map_errno<ENOMEM, EACCES, EINVAL>(ret));
@@ -360,8 +366,22 @@
return ret;
}
+ // TODO: should we pad before mapping usage?
+
+ // NOTE: read this property directly from the property as this code has to run on
+ // Android Q, but the sysprop was only introduced in Android S.
+ static size_t sPadding =
+ base::GetUintProperty("media.c2.dmabuf.padding", (uint32_t)0, MAX_PADDING);
+ if (sPadding > SIZE_MAX - capacity) {
+ // size would overflow
+ ALOGD("dmabuf_alloc: size #%x cannot accommodate padding #%zx", capacity, sPadding);
+ return C2_NO_MEMORY;
+ }
+
+ size_t allocSize = (size_t)capacity + sPadding;
+ // TODO: should we align allocation size to mBlockSize to reflect the true allocation size?
std::shared_ptr<C2DmaBufAllocation> alloc = std::make_shared<C2DmaBufAllocation>(
- mBufferAllocator, capacity, heap_name, flags, getId());
+ mBufferAllocator, allocSize, heap_name, flags, getId());
ret = alloc->status();
if (ret == C2_OK) {
*allocation = alloc;
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index 314a822..b1d72e8 100644
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -2345,7 +2345,7 @@
if (mLastTrack == NULL)
return ERROR_MALFORMED;
- AMediaFormat_setBuffer(mLastTrack->meta,
+ AMediaFormat_setBuffer(mLastTrack->meta,
AMEDIAFORMAT_KEY_ESDS, &buffer[4], chunk_data_size - 4);
if (mPath.size() >= 2
@@ -2427,7 +2427,7 @@
if (mLastTrack == NULL)
return ERROR_MALFORMED;
- AMediaFormat_setBuffer(mLastTrack->meta,
+ AMediaFormat_setBuffer(mLastTrack->meta,
AMEDIAFORMAT_KEY_CSD_AVC, buffer.get(), chunk_data_size);
break;
@@ -2449,7 +2449,7 @@
if (mLastTrack == NULL)
return ERROR_MALFORMED;
- AMediaFormat_setBuffer(mLastTrack->meta,
+ AMediaFormat_setBuffer(mLastTrack->meta,
AMEDIAFORMAT_KEY_CSD_HEVC, buffer.get(), chunk_data_size);
*offset += chunk_size;
@@ -4021,13 +4021,13 @@
// custom genre string
buffer[size] = '\0';
- AMediaFormat_setString(mFileMetaData,
+ AMediaFormat_setString(mFileMetaData,
metadataKey, (const char *)buffer + 8);
}
} else {
buffer[size] = '\0';
- AMediaFormat_setString(mFileMetaData,
+ AMediaFormat_setString(mFileMetaData,
metadataKey, (const char *)buffer + 8);
}
}
@@ -4568,6 +4568,9 @@
if (objectTypeIndication == 0x6B || objectTypeIndication == 0x69) {
// mp3 audio
+ if (mLastTrack == NULL)
+ return ERROR_MALFORMED;
+
AMediaFormat_setString(mLastTrack->meta,AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_AUDIO_MPEG);
return OK;
}
@@ -4658,6 +4661,10 @@
if (offset >= csd_size || csd[offset] != 0x01) {
return ERROR_MALFORMED;
}
+
+ if (mLastTrack == NULL) {
+ return ERROR_MALFORMED;
+ }
// formerly kKeyVorbisInfo
AMediaFormat_setBuffer(mLastTrack->meta,
AMEDIAFORMAT_KEY_CSD_0, &csd[offset], len1);
@@ -6187,9 +6194,13 @@
if (newBuffer) {
if (mIsPcm) {
// The twos' PCM block reader assumes that all samples has the same size.
-
- uint32_t samplesToRead = mSampleTable->getLastSampleIndexInChunk()
- - mCurrentSampleIndex + 1;
+ uint32_t lastSampleIndexInChunk = mSampleTable->getLastSampleIndexInChunk();
+ if (lastSampleIndexInChunk < mCurrentSampleIndex) {
+ mBuffer->release();
+ mBuffer = nullptr;
+ return AMEDIA_ERROR_UNKNOWN;
+ }
+ uint32_t samplesToRead = lastSampleIndexInChunk - mCurrentSampleIndex + 1;
if (samplesToRead > kMaxPcmFrameSize) {
samplesToRead = kMaxPcmFrameSize;
}
@@ -6198,13 +6209,17 @@
samplesToRead, size, mCurrentSampleIndex,
mSampleTable->getLastSampleIndexInChunk());
- size_t totalSize = samplesToRead * size;
+ size_t totalSize = samplesToRead * size;
+ if (mBuffer->size() < totalSize) {
+ mBuffer->release();
+ mBuffer = nullptr;
+ return AMEDIA_ERROR_UNKNOWN;
+ }
uint8_t* buf = (uint8_t *)mBuffer->data();
ssize_t bytesRead = mDataSource->readAt(offset, buf, totalSize);
if (bytesRead < (ssize_t)totalSize) {
mBuffer->release();
mBuffer = NULL;
-
return AMEDIA_ERROR_IO;
}
@@ -6258,7 +6273,19 @@
if (isSyncSample) {
AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_IS_SYNC_FRAME, 1);
}
-
+
+ AMediaFormat_setInt64(
+ meta, "sample-file-offset" /*AMEDIAFORMAT_KEY_SAMPLE_FILE_OFFSET*/,
+ offset);
+
+ if (mSampleTable != nullptr &&
+ mCurrentSampleIndex == mSampleTable->getLastSampleIndexInChunk()) {
+ AMediaFormat_setInt64(
+ meta,
+ "last-sample-index-in-chunk" /*AMEDIAFORMAT_KEY_LAST_SAMPLE_INDEX_IN_CHUNK*/,
+ mSampleTable->getLastSampleIndexInChunk());
+ }
+
++mCurrentSampleIndex;
}
}
@@ -6408,6 +6435,17 @@
AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_IS_SYNC_FRAME, 1);
}
+ AMediaFormat_setInt64(
+ meta, "sample-file-offset" /*AMEDIAFORMAT_KEY_SAMPLE_FILE_OFFSET*/, offset);
+
+ if (mSampleTable != nullptr &&
+ mCurrentSampleIndex == mSampleTable->getLastSampleIndexInChunk()) {
+ AMediaFormat_setInt64(
+ meta,
+ "last-sample-index-in-chunk" /*AMEDIAFORMAT_KEY_LAST_SAMPLE_INDEX_IN_CHUNK*/,
+ mSampleTable->getLastSampleIndexInChunk());
+ }
+
++mCurrentSampleIndex;
*out = mBuffer;
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index 22cf254..3333925 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -74,8 +74,9 @@
* The nominal range of the data is [-1.0f, 1.0f).
* Values outside that range may be clipped.
*
- * See also 'floatData' at
- * https://developer.android.com/reference/android/media/AudioTrack#write(float[],%20int,%20int,%20int)
+ * See also the float Data in
+ * <a href="/reference/android/media/AudioTrack#write(float[],%20int,%20int,%20int)">
+ * write(float[], int, int, int)</a>.
*/
AAUDIO_FORMAT_PCM_FLOAT,
@@ -196,21 +197,69 @@
};
typedef int32_t aaudio_result_t;
+/**
+ * AAudio Stream states, for details, refer to
+ * <a href="/ndk/guides/audio/aaudio/aaudio#using-streams">Using an Audio Stream</a>
+ */
enum
{
+
+ /**
+ * The stream is created but not initialized yet.
+ */
AAUDIO_STREAM_STATE_UNINITIALIZED = 0,
+ /**
+ * The stream is in an unrecognized state.
+ */
AAUDIO_STREAM_STATE_UNKNOWN,
+
+ /**
+ * The stream is open and ready to use.
+ */
AAUDIO_STREAM_STATE_OPEN,
+ /**
+ * The stream is just starting up.
+ */
AAUDIO_STREAM_STATE_STARTING,
+ /**
+ * The stream has started.
+ */
AAUDIO_STREAM_STATE_STARTED,
+ /**
+ * The stream is pausing.
+ */
AAUDIO_STREAM_STATE_PAUSING,
+ /**
+ * The stream has paused, could be restarted or flushed.
+ */
AAUDIO_STREAM_STATE_PAUSED,
+ /**
+ * The stream is being flushed.
+ */
AAUDIO_STREAM_STATE_FLUSHING,
+ /**
+ * The stream is flushed, ready to be restarted.
+ */
AAUDIO_STREAM_STATE_FLUSHED,
+ /**
+ * The stream is stopping.
+ */
AAUDIO_STREAM_STATE_STOPPING,
+ /**
+ * The stream has been stopped.
+ */
AAUDIO_STREAM_STATE_STOPPED,
+ /**
+ * The stream is closing.
+ */
AAUDIO_STREAM_STATE_CLOSING,
+ /**
+ * The stream has been closed.
+ */
AAUDIO_STREAM_STATE_CLOSED,
+ /**
+ * The stream is disconnected from audio device.
+ */
AAUDIO_STREAM_STATE_DISCONNECTED
};
typedef int32_t aaudio_stream_state_t;
@@ -260,7 +309,8 @@
* This information is used by certain platforms or routing policies
* to make more refined volume or routing decisions.
*
- * Note that these match the equivalent values in {@link android.media.AudioAttributes}
+ * Note that these match the equivalent values in
+ * <a href="/reference/android/media/AudioAttributes">AudioAttributes</a>
* in the Android Java API.
*
* Added in API level 28.
@@ -361,7 +411,8 @@
* an audio book application) this information might be used by the audio framework to
* enforce audio focus.
*
- * Note that these match the equivalent values in {@link android.media.AudioAttributes}
+ * Note that these match the equivalent values in
+ * <a href="/reference/android/media/AudioAttributes">AudioAttributes</a>
* in the Android Java API.
*
* Added in API level 28.
@@ -441,7 +492,8 @@
/**
* Specifying if audio may or may not be captured by other apps or the system.
*
- * Note that these match the equivalent values in {@link android.media.AudioAttributes}
+ * Note that these match the equivalent values in
+ * <a href="/reference/android/media/AudioAttributes">AudioAttributes</a>
* in the Android Java API.
*
* Added in API level 29.
@@ -453,10 +505,11 @@
* For privacy, the following usages can not be recorded: AAUDIO_VOICE_COMMUNICATION*,
* AAUDIO_USAGE_NOTIFICATION*, AAUDIO_USAGE_ASSISTANCE* and {@link #AAUDIO_USAGE_ASSISTANT}.
*
- * On {@link android.os.Build.VERSION_CODES#Q}, this means only {@link #AAUDIO_USAGE_MEDIA}
- * and {@link #AAUDIO_USAGE_GAME} may be captured.
+ * On <a href="/reference/android/os/Build.VERSION_CODES#Q">Build.VERSION_CODES</a>,
+ * this means only {@link #AAUDIO_USAGE_MEDIA} and {@link #AAUDIO_USAGE_GAME} may be captured.
*
- * See {@link android.media.AudioAttributes#ALLOW_CAPTURE_BY_ALL}.
+ * See <a href="/reference/android/media/AudioAttributes.html#ALLOW_CAPTURE_BY_ALL">
+ * ALLOW_CAPTURE_BY_ALL</a>.
*/
AAUDIO_ALLOW_CAPTURE_BY_ALL = 1,
/**
@@ -464,8 +517,9 @@
*
* System apps can capture for many purposes like accessibility, user guidance...
* but have strong restriction. See
- * {@link android.media.AudioAttributes#ALLOW_CAPTURE_BY_SYSTEM} for what the system apps
- * can do with the capture audio.
+ * <a href="/reference/android/media/AudioAttributes.html#ALLOW_CAPTURE_BY_SYSTEM">
+ * ALLOW_CAPTURE_BY_SYSTEM</a>
+ * for what the system apps can do with the capture audio.
*/
AAUDIO_ALLOW_CAPTURE_BY_SYSTEM = 2,
/**
@@ -473,7 +527,8 @@
*
* It is encouraged to use {@link #AAUDIO_ALLOW_CAPTURE_BY_SYSTEM} instead of this value as system apps
* provide significant and useful features for the user (eg. accessibility).
- * See {@link android.media.AudioAttributes#ALLOW_CAPTURE_BY_NONE}.
+ * See <a href="/reference/android/media/AudioAttributes.html#ALLOW_CAPTURE_BY_NONE">
+ * ALLOW_CAPTURE_BY_NONE</a>.
*/
AAUDIO_ALLOW_CAPTURE_BY_NONE = 3,
};
@@ -803,7 +858,9 @@
* The default is {@link #AAUDIO_ALLOW_CAPTURE_BY_ALL}.
*
* Note that an application can also set its global policy, in which case the most restrictive
- * policy is always applied. See {@link android.media.AudioAttributes#setAllowedCapturePolicy(int)}
+ * policy is always applied. See
+ * <a href="/reference/android/media/AudioManager#setAllowedCapturePolicy(int)">
+ * setAllowedCapturePolicy(int)</a>
*
* Available since API level 29.
*
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
index 5d311fc..f4a40a8 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -20,6 +20,7 @@
#include <algorithm>
#include <audio_utils/primitives.h>
#include <aaudio/AAudio.h>
+#include <media/MediaMetricsItem.h>
#include "client/AudioStreamInternalCapture.h"
#include "utility/AudioClock.h"
@@ -268,7 +269,7 @@
if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
ALOGD("%s(): callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
- result = systemStopFromCallback();
+ result = systemStopInternal();
break;
}
}
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index b81e5e4..71bde90 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -19,6 +19,7 @@
#define ATRACE_TAG ATRACE_TAG_AUDIO
+#include <media/MediaMetricsItem.h>
#include <utils/Trace.h>
#include "client/AudioStreamInternalPlay.h"
@@ -301,7 +302,7 @@
}
} else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
ALOGD("%s(): callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
- result = systemStopFromCallback();
+ result = systemStopInternal();
break;
}
}
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 53523c5..ef83c8e 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -59,6 +59,10 @@
if (!mMetricsId.empty()) {
android::mediametrics::LogItem(mMetricsId)
.set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_ENDAAUDIOSTREAM)
+ .set(AMEDIAMETRICS_PROP_ENCODINGREQUESTED,
+ android::toString(mDeviceFormat).c_str())
+ .set(AMEDIAMETRICS_PROP_PERFORMANCEMODEACTUAL,
+ AudioGlobal_convertPerformanceModeToText(getPerformanceMode()))
.record();
}
@@ -124,7 +128,12 @@
.set(AMEDIAMETRICS_PROP_PERFORMANCEMODE,
AudioGlobal_convertPerformanceModeToText(getPerformanceMode()))
.set(AMEDIAMETRICS_PROP_SHARINGMODE,
- AudioGlobal_convertSharingModeToText(getSharingMode()));
+ AudioGlobal_convertSharingModeToText(getSharingMode()))
+ .set(AMEDIAMETRICS_PROP_BUFFERCAPACITYFRAMES, getBufferCapacity())
+ .set(AMEDIAMETRICS_PROP_BURSTFRAMES, getFramesPerBurst())
+ .set(AMEDIAMETRICS_PROP_DIRECTION,
+ AudioGlobal_convertDirectionToText(getDirection()));
+
if (getDirection() == AAUDIO_DIRECTION_OUTPUT) {
item.set(AMEDIAMETRICS_PROP_PLAYERIID, mPlayerBase->getPlayerIId());
}
@@ -143,13 +152,13 @@
}
aaudio_result_t AudioStream::systemStart() {
- std::lock_guard<std::mutex> lock(mStreamLock);
-
if (collidesWithCallback()) {
ALOGE("%s cannot be called from a callback!", __func__);
return AAUDIO_ERROR_INVALID_STATE;
}
+ std::lock_guard<std::mutex> lock(mStreamLock);
+
switch (getState()) {
// Is this a good time to start?
case AAUDIO_STREAM_STATE_OPEN:
@@ -187,7 +196,6 @@
}
aaudio_result_t AudioStream::systemPause() {
- std::lock_guard<std::mutex> lock(mStreamLock);
if (!isPauseSupported()) {
return AAUDIO_ERROR_UNIMPLEMENTED;
@@ -198,6 +206,7 @@
return AAUDIO_ERROR_INVALID_STATE;
}
+ std::lock_guard<std::mutex> lock(mStreamLock);
switch (getState()) {
// Proceed with pausing.
case AAUDIO_STREAM_STATE_STARTING:
@@ -242,12 +251,12 @@
return AAUDIO_ERROR_UNIMPLEMENTED;
}
- std::lock_guard<std::mutex> lock(mStreamLock);
if (collidesWithCallback()) {
ALOGE("stream cannot be flushed from a callback!");
return AAUDIO_ERROR_INVALID_STATE;
}
+ std::lock_guard<std::mutex> lock(mStreamLock);
aaudio_result_t result = AAudio_isFlushAllowed(getState());
if (result != AAUDIO_OK) {
return result;
@@ -256,7 +265,7 @@
return requestFlush_l();
}
-aaudio_result_t AudioStream::systemStopFromCallback() {
+aaudio_result_t AudioStream::systemStopInternal() {
std::lock_guard<std::mutex> lock(mStreamLock);
aaudio_result_t result = safeStop_l();
if (result == AAUDIO_OK) {
@@ -267,17 +276,12 @@
}
aaudio_result_t AudioStream::systemStopFromApp() {
- std::lock_guard<std::mutex> lock(mStreamLock);
+ // This check can and should be done outside the lock.
if (collidesWithCallback()) {
ALOGE("stream cannot be stopped by calling from a callback!");
return AAUDIO_ERROR_INVALID_STATE;
}
- aaudio_result_t result = safeStop_l();
- if (result == AAUDIO_OK) {
- // We only call this for logging in "dumpsys audio". So ignore return code.
- (void) mPlayerBase->stopWithStatus();
- }
- return result;
+ return systemStopInternal();
}
aaudio_result_t AudioStream::safeStop_l() {
@@ -316,12 +320,12 @@
}
aaudio_result_t AudioStream::safeRelease() {
- // This may get temporarily unlocked in the MMAP release() when joining callback threads.
- std::lock_guard<std::mutex> lock(mStreamLock);
if (collidesWithCallback()) {
ALOGE("%s cannot be called from a callback!", __func__);
return AAUDIO_ERROR_INVALID_STATE;
}
+ // This may get temporarily unlocked in the MMAP release() when joining callback threads.
+ std::lock_guard<std::mutex> lock(mStreamLock);
if (getState() == AAUDIO_STREAM_STATE_CLOSING) { // already released?
return AAUDIO_OK;
}
@@ -329,23 +333,36 @@
}
aaudio_result_t AudioStream::safeReleaseClose() {
- // This get temporarily unlocked in the MMAP release() when joining callback threads.
- std::lock_guard<std::mutex> lock(mStreamLock);
if (collidesWithCallback()) {
ALOGE("%s cannot be called from a callback!", __func__);
return AAUDIO_ERROR_INVALID_STATE;
}
- releaseCloseFinal_l();
- return AAUDIO_OK;
+ return safeReleaseCloseInternal();
}
-aaudio_result_t AudioStream::safeReleaseCloseFromCallback() {
+aaudio_result_t AudioStream::safeReleaseCloseInternal() {
// This get temporarily unlocked in the MMAP release() when joining callback threads.
std::lock_guard<std::mutex> lock(mStreamLock);
releaseCloseFinal_l();
return AAUDIO_OK;
}
+void AudioStream::close_l() {
+ // Releasing the stream will set the state to CLOSING.
+ assert(getState() == AAUDIO_STREAM_STATE_CLOSING);
+ // setState() prevents a transition from CLOSING to any state other than CLOSED.
+ // State is checked by destructor.
+ setState(AAUDIO_STREAM_STATE_CLOSED);
+
+ if (!mMetricsId.empty()) {
+ android::mediametrics::LogItem(mMetricsId)
+ .set(AMEDIAMETRICS_PROP_FRAMESTRANSFERRED,
+ getDirection() == AAUDIO_DIRECTION_INPUT ? getFramesWritten()
+ : getFramesRead())
+ .record();
+ }
+}
+
void AudioStream::setState(aaudio_stream_state_t state) {
ALOGD("%s(s#%d) from %d to %d", __func__, getId(), mState, state);
if (state == mState) {
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index 333e665..3930964 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -146,13 +146,7 @@
* Free any resources not already freed by release_l().
* Assume release_l() already called.
*/
- virtual void close_l() REQUIRES(mStreamLock) {
- // Releasing the stream will set the state to CLOSING.
- assert(getState() == AAUDIO_STREAM_STATE_CLOSING);
- // setState() prevents a transition from CLOSING to any state other than CLOSED.
- // State is checked by destructor.
- setState(AAUDIO_STREAM_STATE_CLOSED);
- }
+ virtual void close_l() REQUIRES(mStreamLock);
public:
// This is only used to identify a stream in the logs without
@@ -408,7 +402,7 @@
/**
* This is called internally when an app callback returns AAUDIO_CALLBACK_RESULT_STOP.
*/
- aaudio_result_t systemStopFromCallback();
+ aaudio_result_t systemStopInternal();
/**
* Safely RELEASE a stream after taking mStreamLock and checking
@@ -424,7 +418,7 @@
*/
aaudio_result_t safeReleaseClose();
- aaudio_result_t safeReleaseCloseFromCallback();
+ aaudio_result_t safeReleaseCloseInternal();
protected:
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
index fdaa2ab..60eb73a 100644
--- a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
@@ -124,7 +124,7 @@
__func__, callbackResult);
}
audioBuffer->size = 0;
- systemStopFromCallback();
+ systemStopInternal();
// Disable the callback just in case the system keeps trying to call us.
mCallbackEnabled.store(false);
}
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
index 7733a04..e3ac6ff 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -308,11 +308,19 @@
}
void AudioStreamRecord::close_l() {
+ // The callbacks are normally joined in the AudioRecord destructor.
+ // But if another object has a reference to the AudioRecord then
+ // it will not get deleted here.
+ // So we should join callbacks explicitly before returning.
+ // Unlock around the join to avoid deadlocks if the callback tries to lock.
+ // This can happen if the callback returns AAUDIO_CALLBACK_RESULT_STOP
+ mStreamLock.unlock();
+ mAudioRecord->stopAndJoinCallbacks();
+ mStreamLock.lock();
+
mAudioRecord.clear();
- // Do not close mFixedBlockWriter because a data callback
- // thread might still be running if someone else has a reference
- // to mAudioRecord.
- // It has a unique_ptr to its buffer so it will clean up by itself.
+ // Do not close mFixedBlockReader. It has a unique_ptr to its buffer
+ // so it will clean up by itself.
AudioStream::close_l();
}
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index 142a85c..df97658 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -259,12 +259,18 @@
}
void AudioStreamTrack::close_l() {
- // Stop callbacks before deleting mFixedBlockReader memory.
+ // The callbacks are normally joined in the AudioTrack destructor.
+ // But if another object has a reference to the AudioTrack then
+ // it will not get deleted here.
+ // So we should join callbacks explicitly before returning.
+ // Unlock around the join to avoid deadlocks if the callback tries to lock.
+ // This can happen if the callback returns AAUDIO_CALLBACK_RESULT_STOP
+ mStreamLock.unlock();
+ mAudioTrack->stopAndJoinCallbacks();
+ mStreamLock.lock();
mAudioTrack.clear();
- // Do not close mFixedBlockReader because a data callback
- // thread might still be running if someone else has a reference
- // to mAudioRecord.
- // It has a unique_ptr to its buffer so it will clean up by itself.
+ // Do not close mFixedBlockReader. It has a unique_ptr to its buffer
+ // so it will clean up by itself.
AudioStream::close_l();
}
diff --git a/media/libaaudio/tests/Android.bp b/media/libaaudio/tests/Android.bp
index 62c9b46..98e9727 100644
--- a/media/libaaudio/tests/Android.bp
+++ b/media/libaaudio/tests/Android.bp
@@ -209,9 +209,9 @@
}
cc_test {
- name: "test_stop_hang",
+ name: "test_callback_race",
defaults: ["libaaudio_tests_defaults"],
- srcs: ["test_stop_hang.cpp"],
+ srcs: ["test_callback_race.cpp"],
shared_libs: [
"libaaudio",
"libbinder",
@@ -250,3 +250,16 @@
"libutils",
],
}
+
+
+cc_test {
+ name: "test_disconnect_race",
+ defaults: ["libaaudio_tests_defaults"],
+ srcs: ["test_disconnect_race.cpp"],
+ shared_libs: [
+ "libaaudio",
+ "libbinder",
+ "libcutils",
+ "libutils",
+ ],
+}
diff --git a/media/libaaudio/tests/test_callback_race.cpp b/media/libaaudio/tests/test_callback_race.cpp
new file mode 100644
index 0000000..843d5d7
--- /dev/null
+++ b/media/libaaudio/tests/test_callback_race.cpp
@@ -0,0 +1,209 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Test whether the callback is joined before the close finishes.
+ *
+ * Start a stream with a callback.
+ * The callback just sleeps for a long time.
+ * While the callback is sleeping, close() the stream from the main thread.
+ * Then check to make sure the callback was joined before the close() returns.
+ *
+ * This can hang if there are deadlocks. So make sure you get a PASSED result.
+ */
+
+#include <atomic>
+#include <stdio.h>
+#include <unistd.h>
+
+#include <gtest/gtest.h>
+
+#include <aaudio/AAudio.h>
+
+// Sleep long enough that the foreground has a change to call close.
+static constexpr int kCallbackSleepMicros = 600 * 1000;
+
+class AudioEngine {
+public:
+
+ // Check for a crash or late callback if we close without stopping.
+ void checkCloseJoins(aaudio_direction_t direction,
+ aaudio_performance_mode_t perfMode,
+ aaudio_data_callback_result_t callbackResult) {
+
+ // Make printf print immediately so that debug info is not stuck
+ // in a buffer if we hang or crash.
+ setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+
+ mCallbackResult = callbackResult;
+ startStreamForStall(direction, perfMode);
+ // When the callback starts it will go to sleep.
+ waitForCallbackToStart();
+
+ printf("call AAudioStream_close()\n");
+ ASSERT_FALSE(mCallbackFinished); // Still sleeping?
+ aaudio_result_t result = AAudioStream_close(mStream); // May hang here!
+ ASSERT_TRUE(mCallbackFinished);
+ ASSERT_EQ(AAUDIO_OK, result);
+ printf("AAudioStream_close() returned %d\n", result);
+
+ ASSERT_EQ(AAUDIO_OK, mError.load());
+ // Did calling stop() from callback fail? It should have.
+ ASSERT_NE(AAUDIO_OK, mStopResult.load());
+ }
+
+private:
+ void startStreamForStall(aaudio_direction_t direction,
+ aaudio_performance_mode_t perfMode) {
+ AAudioStreamBuilder* builder = nullptr;
+ aaudio_result_t result = AAUDIO_OK;
+
+ // Use an AAudioStreamBuilder to contain requested parameters.
+ result = AAudio_createStreamBuilder(&builder);
+ ASSERT_EQ(AAUDIO_OK, result);
+
+ // Request stream properties.
+ AAudioStreamBuilder_setDirection(builder, direction);
+ AAudioStreamBuilder_setPerformanceMode(builder, perfMode);
+ AAudioStreamBuilder_setDataCallback(builder, s_myDataCallbackProc, this);
+ AAudioStreamBuilder_setErrorCallback(builder, s_myErrorCallbackProc, this);
+
+ // Create an AAudioStream using the Builder.
+ result = AAudioStreamBuilder_openStream(builder, &mStream);
+ AAudioStreamBuilder_delete(builder);
+ ASSERT_EQ(AAUDIO_OK, result);
+
+ // Check to see what kind of stream we actually got.
+ int32_t deviceId = AAudioStream_getDeviceId(mStream);
+ aaudio_performance_mode_t
+ actualPerfMode = AAudioStream_getPerformanceMode(mStream);
+ printf("-------- opened: deviceId = %3d, perfMode = %d\n",
+ deviceId,
+ actualPerfMode);
+
+ // Start stream.
+ result = AAudioStream_requestStart(mStream);
+ ASSERT_EQ(AAUDIO_OK, result);
+ }
+
+ void waitForCallbackToStart() {
+ // Wait for callback to say it has been called.
+ int countDownMillis = 2000;
+ constexpr int countDownPeriodMillis = 50;
+ while (!mCallbackStarted && countDownMillis > 0) {
+ printf("Waiting for callback to start, %d\n", countDownMillis);
+ usleep(countDownPeriodMillis * 1000);
+ countDownMillis -= countDownPeriodMillis;
+ }
+ ASSERT_LT(0, countDownMillis);
+ ASSERT_TRUE(mCallbackStarted);
+ }
+
+// Callback function that fills the audio output buffer.
+ static aaudio_data_callback_result_t s_myDataCallbackProc(
+ AAudioStream *stream,
+ void *userData,
+ void * /*audioData */,
+ int32_t /* numFrames */
+ ) {
+ AudioEngine* engine = (AudioEngine*) userData;
+ engine->mCallbackStarted = true;
+ usleep(kCallbackSleepMicros);
+ // it is illegal to call stop() from the callback. It should
+ // return an error and not hang.
+ engine->mStopResult = AAudioStream_requestStop(stream);
+ engine->mCallbackFinished = true;
+ return engine->mCallbackResult;
+ }
+
+ static void s_myErrorCallbackProc(
+ AAudioStream * /* stream */,
+ void *userData,
+ aaudio_result_t error) {
+ AudioEngine *engine = (AudioEngine *)userData;
+ engine->mError = error;
+ }
+
+ AAudioStream* mStream = nullptr;
+
+ std::atomic<aaudio_result_t> mError{AAUDIO_OK}; // written by error callback
+ std::atomic<bool> mCallbackStarted{false}; // written by data callback
+ std::atomic<bool> mCallbackFinished{false}; // written by data callback
+ std::atomic<aaudio_data_callback_result_t> mCallbackResult{AAUDIO_CALLBACK_RESULT_CONTINUE};
+ std::atomic<aaudio_result_t> mStopResult{AAUDIO_OK};
+};
+
+/*********************************************************************/
+// Tell the callback to return AAUDIO_CALLBACK_RESULT_CONTINUE.
+
+TEST(test_close_timing, aaudio_close_joins_input_none) {
+ AudioEngine engine;
+ engine.checkCloseJoins(AAUDIO_DIRECTION_INPUT,
+ AAUDIO_PERFORMANCE_MODE_NONE,
+ AAUDIO_CALLBACK_RESULT_CONTINUE);
+}
+
+TEST(test_close_timing, aaudio_close_joins_output_none) {
+ AudioEngine engine;
+ engine.checkCloseJoins(AAUDIO_DIRECTION_OUTPUT,
+ AAUDIO_PERFORMANCE_MODE_NONE,
+ AAUDIO_CALLBACK_RESULT_CONTINUE);
+}
+
+TEST(test_close_timing, aaudio_close_joins_input_lowlat) {
+ AudioEngine engine;
+ engine.checkCloseJoins(AAUDIO_DIRECTION_INPUT,
+ AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+ AAUDIO_CALLBACK_RESULT_CONTINUE);
+}
+
+TEST(test_close_timing, aaudio_close_joins_output_lowlat) {
+ AudioEngine engine;
+ engine.checkCloseJoins(AAUDIO_DIRECTION_OUTPUT,
+ AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+ AAUDIO_CALLBACK_RESULT_CONTINUE);
+}
+
+/*********************************************************************/
+// Tell the callback to return AAUDIO_CALLBACK_RESULT_STOP.
+
+TEST(test_close_timing, aaudio_close_joins_input_lowlat_stop) {
+ AudioEngine engine;
+ engine.checkCloseJoins(AAUDIO_DIRECTION_INPUT,
+ AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+ AAUDIO_CALLBACK_RESULT_STOP);
+}
+
+TEST(test_close_timing, aaudio_close_joins_output_lowlat_stop) {
+ AudioEngine engine;
+ engine.checkCloseJoins(AAUDIO_DIRECTION_OUTPUT,
+ AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+ AAUDIO_CALLBACK_RESULT_STOP);
+}
+
+TEST(test_close_timing, aaudio_close_joins_output_none_stop) {
+ AudioEngine engine;
+ engine.checkCloseJoins(AAUDIO_DIRECTION_OUTPUT,
+ AAUDIO_PERFORMANCE_MODE_NONE,
+ AAUDIO_CALLBACK_RESULT_STOP);
+}
+
+TEST(test_close_timing, aaudio_close_joins_input_none_stop) {
+ AudioEngine engine;
+ engine.checkCloseJoins(AAUDIO_DIRECTION_INPUT,
+ AAUDIO_PERFORMANCE_MODE_NONE,
+ AAUDIO_CALLBACK_RESULT_STOP);
+}
diff --git a/media/libaaudio/tests/test_disconnect_race.cpp b/media/libaaudio/tests/test_disconnect_race.cpp
new file mode 100644
index 0000000..6dbe165
--- /dev/null
+++ b/media/libaaudio/tests/test_disconnect_race.cpp
@@ -0,0 +1,198 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Test whether an error callback is joined before the close finishes.
+ *
+ * Start a stream with a callback.
+ * The callback just sleeps for a long time.
+ * While the callback is sleeping, close() the stream from the main thread.
+ * Then check to make sure the callback was joined before the close() returns.
+ *
+ * This can hang if there are deadlocks. So make sure you get a PASSED result.
+ */
+
+#include <atomic>
+#include <stdio.h>
+#include <unistd.h>
+
+#include <aaudio/AAudio.h>
+
+// Sleep long enough that the foreground has a chance to call close.
+static constexpr int kCallbackSleepMillis = 1000;
+static constexpr int kPollSleepMillis = 100;
+
+static int sErrorCount = 0;
+
+#define MY_ASSERT_TRUE(statement) \
+ if (!(statement)) { \
+ printf("ERROR line:%d - " #statement "\n", __LINE__); \
+ sErrorCount++; \
+ return false; \
+ }
+
+#define MY_ASSERT_EQ(aa,bb) MY_ASSERT_TRUE(((aa) == (bb)))
+#define MY_ASSERT_NE(aa,bb) MY_ASSERT_TRUE(((aa) != (bb)))
+
+class AudioEngine {
+public:
+
+ // Check for a crash or late callback if we close without stopping.
+ bool checkCloseJoins(aaudio_direction_t direction,
+ aaudio_performance_mode_t perfMode,
+ bool callStopFromCallback) {
+ mCallStopFromCallback = callStopFromCallback;
+
+ if (!startStreamForStall(direction, perfMode)) return false;
+
+ printf("--------------------------------------------------------\n");
+ printf("%s() - direction = %d, perfMode = %d, callStop = %d\n",
+ __func__, direction, perfMode, callStopFromCallback);
+
+ // When the callback starts it will go to sleep.
+ if (!waitForCallbackToStart()) return false;
+
+ printf("call AAudioStream_close()\n");
+ MY_ASSERT_TRUE(!mCallbackFinished); // Still sleeping?
+ aaudio_result_t result = AAudioStream_close(mStream); // May hang here!
+ if (mCallbackStarted) {
+ MY_ASSERT_TRUE(mCallbackFinished);
+ }
+ MY_ASSERT_EQ(AAUDIO_OK, result);
+ printf("AAudioStream_close() returned %d\n", result);
+
+ MY_ASSERT_EQ(AAUDIO_ERROR_DISCONNECTED, mError.load());
+ if (mCallStopFromCallback) {
+ // Did calling stop() from callback fail? It should have.
+ MY_ASSERT_NE(AAUDIO_OK, mStopResult.load());
+ }
+
+ return true;
+ }
+
+private:
+ bool startStreamForStall(aaudio_direction_t direction,
+ aaudio_performance_mode_t perfMode) {
+ AAudioStreamBuilder* builder = nullptr;
+ aaudio_result_t result = AAUDIO_OK;
+
+ // Use an AAudioStreamBuilder to contain requested parameters.
+ result = AAudio_createStreamBuilder(&builder);
+ MY_ASSERT_EQ(AAUDIO_OK, result);
+
+ // Request stream properties.
+ AAudioStreamBuilder_setDirection(builder, direction);
+ AAudioStreamBuilder_setPerformanceMode(builder, perfMode);
+ AAudioStreamBuilder_setDataCallback(builder, s_myDataCallbackProc, this);
+ AAudioStreamBuilder_setErrorCallback(builder, s_myErrorCallbackProc, this);
+
+ // Create an AAudioStream using the Builder.
+ result = AAudioStreamBuilder_openStream(builder, &mStream);
+ AAudioStreamBuilder_delete(builder);
+ MY_ASSERT_EQ(AAUDIO_OK, result);
+
+ // Check to see what kind of stream we actually got.
+ int32_t deviceId = AAudioStream_getDeviceId(mStream);
+ aaudio_performance_mode_t
+ actualPerfMode = AAudioStream_getPerformanceMode(mStream);
+ printf("-------- opened: deviceId = %3d, perfMode = %d\n",
+ deviceId,
+ actualPerfMode);
+
+ // Start stream.
+ result = AAudioStream_requestStart(mStream);
+ MY_ASSERT_EQ(AAUDIO_OK, result);
+
+ return true;
+ }
+
+ bool waitForCallbackToStart() {
+ // Wait for callback to say it has been called.
+ int countDown = 10 * 1000 / kPollSleepMillis;
+ while (!mCallbackStarted && countDown > 0) {
+ if ((countDown % 5) == 0) {
+ printf("===== Please PLUG or UNPLUG headphones! ======= %d\n", countDown);
+ }
+ usleep(kPollSleepMillis * 1000);
+ countDown--;
+ }
+ MY_ASSERT_TRUE(countDown > 0);
+ MY_ASSERT_TRUE(mCallbackStarted);
+ return true;
+ }
+
+// Callback function that fills the audio output buffer.
+ static aaudio_data_callback_result_t s_myDataCallbackProc(
+ AAudioStream * /* stream */,
+ void * /* userData */,
+ void * /* audioData */,
+ int32_t /* numFrames */
+ ) {
+ return AAUDIO_CALLBACK_RESULT_CONTINUE;
+ }
+
+ static void s_myErrorCallbackProc(
+ AAudioStream * stream,
+ void *userData,
+ aaudio_result_t error) {
+ AudioEngine *engine = (AudioEngine *)userData;
+ engine->mError = error;
+ engine->mCallbackStarted = true;
+ usleep(kCallbackSleepMillis * 1000);
+ // it is illegal to call stop() from the callback. It should
+ // return an error and not hang.
+ if (engine->mCallStopFromCallback) {
+ engine->mStopResult = AAudioStream_requestStop(stream);
+ }
+ engine->mCallbackFinished = true;
+ }
+
+ AAudioStream* mStream = nullptr;
+
+ std::atomic<aaudio_result_t> mError{AAUDIO_OK}; // written by error callback
+ std::atomic<bool> mCallStopFromCallback{false};
+ std::atomic<bool> mCallbackStarted{false}; // written by error callback
+ std::atomic<bool> mCallbackFinished{false}; // written by error callback
+ std::atomic<aaudio_result_t> mStopResult{AAUDIO_OK};
+};
+
+int main(int, char **) {
+ // Parameters to test.
+ static aaudio_direction_t directions[] = {AAUDIO_DIRECTION_OUTPUT,
+ AAUDIO_DIRECTION_INPUT};
+ static aaudio_performance_mode_t perfModes[] =
+ {AAUDIO_PERFORMANCE_MODE_LOW_LATENCY, AAUDIO_PERFORMANCE_MODE_NONE};
+ static bool callStops[] = { false, true };
+
+ // Make printf print immediately so that debug info is not stuck
+ // in a buffer if we hang or crash.
+ setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+
+ printf("Test Disconnect Race V1.0\n");
+ printf("\n");
+
+ for (auto callStop : callStops) {
+ for (auto direction : directions) {
+ for (auto perfMode : perfModes) {
+ AudioEngine engine;
+ engine.checkCloseJoins(direction, perfMode, callStop);
+ }
+ }
+ }
+
+ printf("Error Count = %d, %s\n", sErrorCount,
+ ((sErrorCount == 0) ? "PASS" : "FAIL"));
+}
diff --git a/media/libaaudio/tests/test_stop_hang.cpp b/media/libaaudio/tests/test_stop_hang.cpp
deleted file mode 100644
index 982ff4a..0000000
--- a/media/libaaudio/tests/test_stop_hang.cpp
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Copyright (C) 2019 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Return stop from the callback
- * and then close the stream immediately.
- */
-
-#include <atomic>
-#include <mutex>
-#include <stdio.h>
-#include <thread>
-#include <unistd.h>
-
-#include <aaudio/AAudio.h>
-
-#define DURATION_SECONDS 5
-
-struct AudioEngine {
- AAudioStreamBuilder *builder = nullptr;
- AAudioStream *stream = nullptr;
- std::thread *thread = nullptr;
-
- std::atomic<bool> started{false};
- std::mutex doneLock; // Use a mutex so we can sleep on it while join()ing.
- std::atomic<bool> done{false};
-
- aaudio_result_t join() {
- aaudio_result_t result = AAUDIO_ERROR_INVALID_STATE;
- if (stream != nullptr) {
- while (true) {
- {
- // Will block if the thread is running.
- // This mutex is used to close() immediately after the callback returns
- // and before the requestStop_l() is called.
- std::lock_guard<std::mutex> lock(doneLock);
- if (done) break;
- }
- printf("join() got mutex but stream not done!");
- usleep(10 * 1000); // sleep then check again
- }
- result = AAudioStream_close(stream);
- stream = nullptr;
- }
- return result;
- }
-};
-
-// Callback function that fills the audio output buffer.
-static aaudio_data_callback_result_t s_myDataCallbackProc(
- AAudioStream *stream,
- void *userData,
- void *audioData,
- int32_t numFrames
-) {
- (void) stream;
- (void) audioData;
- (void) numFrames;
- AudioEngine *engine = (struct AudioEngine *)userData;
- std::lock_guard<std::mutex> lock(engine->doneLock);
- engine->started = true;
- usleep(DURATION_SECONDS * 1000 * 1000); // Mimic SynthMark procedure.
- engine->done = true;
- return AAUDIO_CALLBACK_RESULT_STOP;
-}
-
-static void s_myErrorCallbackProc(
- AAudioStream *stream __unused,
- void *userData __unused,
- aaudio_result_t error) {
- printf("%s() - error = %d\n", __func__, error);
-}
-
-static aaudio_result_t s_OpenAudioStream(struct AudioEngine *engine) {
- // Use an AAudioStreamBuilder to contain requested parameters.
- aaudio_result_t result = AAudio_createStreamBuilder(&engine->builder);
- if (result != AAUDIO_OK) {
- printf("AAudio_createStreamBuilder returned %s",
- AAudio_convertResultToText(result));
- return result;
- }
-
- // Request stream properties.
- AAudioStreamBuilder_setPerformanceMode(engine->builder, AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
- AAudioStreamBuilder_setDataCallback(engine->builder, s_myDataCallbackProc, engine);
- AAudioStreamBuilder_setErrorCallback(engine->builder, s_myErrorCallbackProc, engine);
-
- // Create an AAudioStream using the Builder.
- result = AAudioStreamBuilder_openStream(engine->builder, &engine->stream);
- if (result != AAUDIO_OK) {
- printf("AAudioStreamBuilder_openStream returned %s",
- AAudio_convertResultToText(result));
- return result;
- }
-
- return result;
-}
-
-int main(int argc, char **argv) {
- (void) argc;
- (void) argv;
- struct AudioEngine engine;
- aaudio_result_t result = AAUDIO_OK;
- int errorCount = 0;
-
- // Make printf print immediately so that debug info is not stuck
- // in a buffer if we hang or crash.
- setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
-
- printf("Test Return Stop Hang V1.0\n");
-
- result = s_OpenAudioStream(&engine);
- if (result != AAUDIO_OK) {
- printf("s_OpenAudioStream returned %s\n",
- AAudio_convertResultToText(result));
- errorCount++;
- }
-
- // Check to see what kind of stream we actually got.
- int32_t deviceId = AAudioStream_getDeviceId(engine.stream);
- aaudio_performance_mode_t actualPerfMode = AAudioStream_getPerformanceMode(engine.stream);
- printf("-------- opened: deviceId = %3d, perfMode = %d\n", deviceId, actualPerfMode);
-
- // Start stream.
- result = AAudioStream_requestStart(engine.stream);
- printf("AAudioStream_requestStart() returned %d >>>>>>>>>>>>>>>>>>>>>>\n", result);
- if (result != AAUDIO_OK) {
- errorCount++;
- } else {
- int counter = 0;
- while (!engine.started) {
- printf("Waiting for stream to start, %d\n", counter++);
- usleep(5 * 1000);
- }
- printf("You should see more messages %d seconds after this. If not then the test failed!\n",
- DURATION_SECONDS);
- result = engine.join(); // This might hang!
- AAudioStreamBuilder_delete(engine.builder);
- engine.builder = nullptr;
- }
-
- printf("aaudio result = %d = %s\n", result, AAudio_convertResultToText(result));
- printf("test %s\n", errorCount ? "FAILED" : "PASSED");
-
- return errorCount ? EXIT_FAILURE : EXIT_SUCCESS;
-}
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index 64a335a..19d68a0 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -348,6 +348,7 @@
"aidl/android/media/AudioUniqueIdUse.aidl",
"aidl/android/media/AudioUsage.aidl",
"aidl/android/media/AudioUuid.aidl",
+ "aidl/android/media/AudioVibratorInfo.aidl",
"aidl/android/media/EffectDescriptor.aidl",
"aidl/android/media/ExtraAudioDescriptor.aidl",
],
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index e15ef3d..90f6f41 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -181,21 +181,9 @@
.set(AMEDIAMETRICS_PROP_STATUS, (int32_t)mStatus)
.record();
+ stopAndJoinCallbacks(); // checks mStatus
+
if (mStatus == NO_ERROR) {
- // Make sure that callback function exits in the case where
- // it is looping on buffer empty condition in obtainBuffer().
- // Otherwise the callback thread will never exit.
- stop();
- if (mAudioRecordThread != 0) {
- mProxy->interrupt();
- mAudioRecordThread->requestExit(); // see comment in AudioRecord.h
- mAudioRecordThread->requestExitAndWait();
- mAudioRecordThread.clear();
- }
- // No lock here: worst case we remove a NULL callback which will be a nop
- if (mDeviceCallback != 0 && mInput != AUDIO_IO_HANDLE_NONE) {
- AudioSystem::removeAudioDeviceCallback(this, mInput, mPortId);
- }
IInterface::asBinder(mAudioRecord)->unlinkToDeath(mDeathNotifier, this);
mAudioRecord.clear();
mCblkMemory.clear();
@@ -208,6 +196,27 @@
}
}
+void AudioRecord::stopAndJoinCallbacks() {
+ // Prevent nullptr crash if it did not open properly.
+ if (mStatus != NO_ERROR) return;
+
+ // Make sure that callback function exits in the case where
+ // it is looping on buffer empty condition in obtainBuffer().
+ // Otherwise the callback thread will never exit.
+ stop();
+ if (mAudioRecordThread != 0) {
+ mProxy->interrupt();
+ mAudioRecordThread->requestExit(); // see comment in AudioRecord.h
+ mAudioRecordThread->requestExitAndWait();
+ mAudioRecordThread.clear();
+ }
+ // No lock here: worst case we remove a NULL callback which will be a nop
+ if (mDeviceCallback != 0 && mInput != AUDIO_IO_HANDLE_NONE) {
+ // This may not stop all of these device callbacks!
+ // TODO: Add some sort of protection.
+ AudioSystem::removeAudioDeviceCallback(this, mInput, mPortId);
+ }
+}
status_t AudioRecord::set(
audio_source_t inputSource,
uint32_t sampleRate,
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index f476b7d..0bc592d 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -2258,6 +2258,15 @@
return NO_ERROR;
}
+status_t AudioSystem::setVibratorInfos(
+ const std::vector<media::AudioVibratorInfo>& vibratorInfos) {
+ const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+ if (af == nullptr) {
+ return PERMISSION_DENIED;
+ }
+ return af->setVibratorInfos(vibratorInfos);
+}
+
// ---------------------------------------------------------------------------
int AudioSystem::AudioPolicyServiceClient::addAudioPortCallback(
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 6c9e85c..1bc3baa 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -327,21 +327,9 @@
.set(AMEDIAMETRICS_PROP_STATUS, (int32_t)mStatus)
.record();
+ stopAndJoinCallbacks(); // checks mStatus
+
if (mStatus == NO_ERROR) {
- // Make sure that callback function exits in the case where
- // it is looping on buffer full condition in obtainBuffer().
- // Otherwise the callback thread will never exit.
- stop();
- if (mAudioTrackThread != 0) {
- mProxy->interrupt();
- mAudioTrackThread->requestExit(); // see comment in AudioTrack.h
- mAudioTrackThread->requestExitAndWait();
- mAudioTrackThread.clear();
- }
- // No lock here: worst case we remove a NULL callback which will be a nop
- if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
- AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
- }
IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
mAudioTrack.clear();
mCblkMemory.clear();
@@ -355,6 +343,29 @@
}
}
+void AudioTrack::stopAndJoinCallbacks() {
+ // Prevent nullptr crash if it did not open properly.
+ if (mStatus != NO_ERROR) return;
+
+ // Make sure that callback function exits in the case where
+ // it is looping on buffer full condition in obtainBuffer().
+ // Otherwise the callback thread will never exit.
+ stop();
+ if (mAudioTrackThread != 0) { // not thread safe
+ mProxy->interrupt();
+ mAudioTrackThread->requestExit(); // see comment in AudioTrack.h
+ mAudioTrackThread->requestExitAndWait();
+ mAudioTrackThread.clear();
+ }
+ // No lock here: worst case we remove a NULL callback which will be a nop
+ if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
+ // This may not stop all of these device callbacks!
+ // TODO: Add some sort of protection.
+ AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
+ mDeviceCallback.clear();
+ }
+}
+
status_t AudioTrack::set(
audio_stream_type_t streamType,
uint32_t sampleRate,
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 4103630..0feafc5 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -733,6 +733,11 @@
return statusTFromBinderStatus(mDelegate->setAudioHalPids(pidsAidl));
}
+status_t AudioFlingerClientAdapter::setVibratorInfos(
+ const std::vector<media::AudioVibratorInfo>& vibratorInfos) {
+ return statusTFromBinderStatus(mDelegate->setVibratorInfos(vibratorInfos));
+}
+
////////////////////////////////////////////////////////////////////////////////////////////////////
// AudioFlingerServerAdapter
@@ -1174,4 +1179,9 @@
return Status::ok();
}
+Status AudioFlingerServerAdapter::setVibratorInfos(
+ const std::vector<media::AudioVibratorInfo>& vibratorInfos) {
+ return Status::fromStatusT(mDelegate->setVibratorInfos(vibratorInfos));
+}
+
} // namespace android
diff --git a/media/libaudioclient/aidl/android/media/AudioVibratorInfo.aidl b/media/libaudioclient/aidl/android/media/AudioVibratorInfo.aidl
new file mode 100644
index 0000000..f88fc3c
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/AudioVibratorInfo.aidl
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/**
+ * {@hide}
+ * A class for vibrator information. The information will be used in HapticGenerator effect.
+ */
+parcelable AudioVibratorInfo {
+ int id;
+ float resonantFrequency;
+ float qFactor;
+}
diff --git a/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
index e63f391..abbced5 100644
--- a/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
@@ -23,6 +23,7 @@
import android.media.AudioStreamType;
import android.media.AudioUniqueIdUse;
import android.media.AudioUuid;
+import android.media.AudioVibratorInfo;
import android.media.CreateEffectRequest;
import android.media.CreateEffectResponse;
import android.media.CreateRecordRequest;
@@ -202,4 +203,8 @@
MicrophoneInfoData[] getMicrophones();
void setAudioHalPids(in int[] /* pid_t[] */ pids);
+
+ // Set vibrators' information.
+ // The value will be used to initialize HapticGenerator.
+ void setVibratorInfos(in AudioVibratorInfo[] vibratorInfos);
}
diff --git a/media/libaudioclient/include/media/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
index 82a29d4..3467c3a 100644
--- a/media/libaudioclient/include/media/AudioRecord.h
+++ b/media/libaudioclient/include/media/AudioRecord.h
@@ -303,6 +303,19 @@
void stop();
bool stopped() const;
+ /* Calls stop() and then wait for all of the callbacks to return.
+ * It is safe to call this if stop() or pause() has already been called.
+ *
+ * This function is called from the destructor. But since AudioRecord
+ * is ref counted, the destructor may be called later than desired.
+ * This can be called explicitly as part of closing an AudioRecord
+ * if you want to be certain that callbacks have completely finished.
+ *
+ * This is not thread safe and should only be called from one thread,
+ * ideally as the AudioRecord is being closed.
+ */
+ void stopAndJoinCallbacks();
+
/* Return the sink sample rate for this record track in Hz.
* If specified as zero in constructor or set(), this will be the source sample rate.
* Unlike AudioTrack, the sample rate is const after initialization, so doesn't need a lock.
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index c63d29f..4c99dbd 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -19,9 +19,10 @@
#include <sys/types.h>
-#include <android/media/permission/Identity.h>
+#include <android/media/AudioVibratorInfo.h>
#include <android/media/BnAudioFlingerClient.h>
#include <android/media/BnAudioPolicyServiceClient.h>
+#include <android/media/permission/Identity.h>
#include <media/AidlConversionUtil.h>
#include <media/AudioDeviceTypeAddr.h>
#include <media/AudioPolicy.h>
@@ -553,6 +554,8 @@
static audio_port_handle_t getDeviceIdForIo(audio_io_handle_t audioIo);
+ static status_t setVibratorInfos(const std::vector<media::AudioVibratorInfo>& vibratorInfos);
+
private:
class AudioFlingerClient: public IBinder::DeathRecipient, public media::BnAudioFlingerClient
diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
index d167c40..c293343 100644
--- a/media/libaudioclient/include/media/AudioTrack.h
+++ b/media/libaudioclient/include/media/AudioTrack.h
@@ -479,6 +479,19 @@
void stop();
bool stopped() const;
+ /* Call stop() and then wait for all of the callbacks to return.
+ * It is safe to call this if stop() or pause() has already been called.
+ *
+ * This function is called from the destructor. But since AudioTrack
+ * is ref counted, the destructor may be called later than desired.
+ * This can be called explicitly as part of closing an AudioTrack
+ * if you want to be certain that callbacks have completely finished.
+ *
+ * This is not thread safe and should only be called from one thread,
+ * ideally as the AudioTrack is being closed.
+ */
+ void stopAndJoinCallbacks();
+
/* Flush a stopped or paused track. All previously buffered data is discarded immediately.
* This has the effect of draining the buffers without mixing or output.
* Flush is intended for streaming mode, for example before switching to non-contiguous content.
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index efd7fed..7f7ca85 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -35,6 +35,7 @@
#include <string>
#include <vector>
+#include <android/media/AudioVibratorInfo.h>
#include <android/media/BnAudioFlingerService.h>
#include <android/media/BpAudioFlingerService.h>
#include <android/media/permission/Identity.h>
@@ -331,6 +332,11 @@
virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones) = 0;
virtual status_t setAudioHalPids(const std::vector<pid_t>& pids) = 0;
+
+ // Set vibrators' information.
+ // The values will be used to initialize HapticGenerator.
+ virtual status_t setVibratorInfos(
+ const std::vector<media::AudioVibratorInfo>& vibratorInfos) = 0;
};
/**
@@ -422,6 +428,7 @@
size_t frameCountHAL(audio_io_handle_t ioHandle) const override;
status_t getMicrophones(std::vector<media::MicrophoneInfo>* microphones) override;
status_t setAudioHalPids(const std::vector<pid_t>& pids) override;
+ status_t setVibratorInfos(const std::vector<media::AudioVibratorInfo>& vibratorInfos) override;
private:
const sp<media::IAudioFlingerService> mDelegate;
@@ -504,6 +511,7 @@
GET_MASTER_BALANCE = media::BnAudioFlingerService::TRANSACTION_getMasterBalance,
SET_EFFECT_SUSPENDED = media::BnAudioFlingerService::TRANSACTION_setEffectSuspended,
SET_AUDIO_HAL_PIDS = media::BnAudioFlingerService::TRANSACTION_setAudioHalPids,
+ SET_VIBRATOR_INFOS = media::BnAudioFlingerService::TRANSACTION_setVibratorInfos,
};
/**
@@ -605,6 +613,7 @@
Status frameCountHAL(int32_t ioHandle, int64_t* _aidl_return) override;
Status getMicrophones(std::vector<media::MicrophoneInfoData>* _aidl_return) override;
Status setAudioHalPids(const std::vector<int32_t>& pids) override;
+ Status setVibratorInfos(const std::vector<media::AudioVibratorInfo>& vibratorInfos) override;
private:
const sp<AudioFlingerServerAdapter::Delegate> mDelegate;
diff --git a/media/libaudiohal/impl/DeviceHalHidl.cpp b/media/libaudiohal/impl/DeviceHalHidl.cpp
index 03a0d86..ca4f663 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.cpp
+++ b/media/libaudiohal/impl/DeviceHalHidl.cpp
@@ -354,7 +354,8 @@
return processReturn("releaseAudioPatch", mDevice->releaseAudioPatch(patch));
}
-status_t DeviceHalHidl::getAudioPort(struct audio_port *port) {
+template <typename HalPort>
+status_t DeviceHalHidl::getAudioPortImpl(HalPort *port) {
if (mDevice == 0) return NO_INIT;
AudioPort hidlPort;
HidlUtils::audioPortFromHal(*port, &hidlPort);
@@ -370,31 +371,28 @@
return processReturn("getAudioPort", ret, retval);
}
+status_t DeviceHalHidl::getAudioPort(struct audio_port *port) {
+ return getAudioPortImpl(port);
+}
+
status_t DeviceHalHidl::getAudioPort(struct audio_port_v7 *port) {
- if (mDevice == 0) return NO_INIT;
- status_t status = NO_ERROR;
#if MAJOR_VERSION >= 7
- AudioPort hidlPort;
- HidlUtils::audioPortFromHal(*port, &hidlPort);
- Result retval;
- Return<void> ret = mDevice->getAudioPort(
- hidlPort,
- [&](Result r, const AudioPort& p) {
- retval = r;
- if (retval == Result::OK) {
- HidlUtils::audioPortToHal(p, port);
- }
- });
- status = processReturn("getAudioPort", ret, retval);
+ return getAudioPortImpl(port);
#else
struct audio_port audioPort = {};
- audio_populate_audio_port(port, &audioPort);
- status = getAudioPort(&audioPort);
+ status_t result = NO_ERROR;
+ if (!audio_populate_audio_port(port, &audioPort)) {
+ ALOGE("Failed to populate legacy audio port from audio_port_v7");
+ result = BAD_VALUE;
+ }
+ status_t status = getAudioPort(&audioPort);
if (status == NO_ERROR) {
audio_populate_audio_port_v7(&audioPort, port);
+ } else {
+ result = status;
}
+ return result;
#endif
- return status;
}
status_t DeviceHalHidl::setAudioPortConfig(const struct audio_port_config *config) {
diff --git a/media/libaudiohal/impl/DeviceHalHidl.h b/media/libaudiohal/impl/DeviceHalHidl.h
index abd4ad5..2c847cf 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.h
+++ b/media/libaudiohal/impl/DeviceHalHidl.h
@@ -131,6 +131,8 @@
// The destructor automatically closes the device.
virtual ~DeviceHalHidl();
+
+ template <typename HalPort> status_t getAudioPortImpl(HalPort *port);
};
} // namespace CPP_VERSION
diff --git a/media/libaudiohal/impl/DeviceHalLocal.cpp b/media/libaudiohal/impl/DeviceHalLocal.cpp
index aa9e477..af7dc1a 100644
--- a/media/libaudiohal/impl/DeviceHalLocal.cpp
+++ b/media/libaudiohal/impl/DeviceHalLocal.cpp
@@ -181,6 +181,12 @@
}
status_t DeviceHalLocal::getAudioPort(struct audio_port_v7 *port) {
+#if MAJOR_VERSION >= 7
+ if (version() >= AUDIO_DEVICE_API_VERSION_3_2) {
+ // get_audio_port_v7 is mandatory if legacy HAL support this API version.
+ return mDev->get_audio_port_v7(mDev, port);
+ }
+#endif
struct audio_port audioPort = {};
audio_populate_audio_port(port, &audioPort);
status_t status = getAudioPort(&audioPort);
diff --git a/media/libaudiohal/impl/StreamHalHidl.cpp b/media/libaudiohal/impl/StreamHalHidl.cpp
index 2a3e2b6..539a149 100644
--- a/media/libaudiohal/impl/StreamHalHidl.cpp
+++ b/media/libaudiohal/impl/StreamHalHidl.cpp
@@ -57,8 +57,7 @@
// Note: This assumes channel mask, format, and sample rate do not change after creation.
audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
if (/* mStreamPowerLog.isUserDebugOrEngBuild() && */
- StreamHalHidl::getAudioProperties(
- &config.sample_rate, &config.channel_mask, &config.format) == NO_ERROR) {
+ StreamHalHidl::getAudioProperties(&config) == NO_ERROR) {
mStreamPowerLog.init(config.sample_rate, config.channel_mask, config.format);
}
}
@@ -69,14 +68,6 @@
hardware::IPCThreadState::self()->flushCommands();
}
-// Note: this method will be removed
-status_t StreamHalHidl::getSampleRate(uint32_t *rate) {
- audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
- status_t status = getAudioProperties(&config.sample_rate, &config.channel_mask, &config.format);
- *rate = config.sample_rate;
- return status;
-}
-
status_t StreamHalHidl::getBufferSize(size_t *size) {
if (!mStream) return NO_INIT;
status_t status = processReturn("getBufferSize", mStream->getBufferSize(), size);
@@ -86,48 +77,28 @@
return status;
}
-// Note: this method will be removed
-status_t StreamHalHidl::getChannelMask(audio_channel_mask_t *mask) {
- audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
- status_t status = getAudioProperties(&config.sample_rate, &config.channel_mask, &config.format);
- *mask = config.channel_mask;
- return status;
-}
-
-// Note: this method will be removed
-status_t StreamHalHidl::getFormat(audio_format_t *format) {
- audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
- status_t status = getAudioProperties(&config.sample_rate, &config.channel_mask, &config.format);
- *format = config.format;
- return status;
-}
-
-status_t StreamHalHidl::getAudioProperties(
- uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format) {
+status_t StreamHalHidl::getAudioProperties(audio_config_base_t *configBase) {
+ *configBase = AUDIO_CONFIG_BASE_INITIALIZER;
if (!mStream) return NO_INIT;
#if MAJOR_VERSION <= 6
Return<void> ret = mStream->getAudioProperties(
[&](uint32_t sr, auto m, auto f) {
- *sampleRate = sr;
- *mask = static_cast<audio_channel_mask_t>(m);
- *format = static_cast<audio_format_t>(f);
+ configBase->sample_rate = sr;
+ configBase->channel_mask = static_cast<audio_channel_mask_t>(m);
+ configBase->format = static_cast<audio_format_t>(f);
});
return processReturn("getAudioProperties", ret);
#else
Result retval;
status_t conversionStatus = BAD_VALUE;
- audio_config_base_t halConfig = AUDIO_CONFIG_BASE_INITIALIZER;
Return<void> ret = mStream->getAudioProperties(
[&](Result r, const AudioConfigBase& config) {
retval = r;
if (retval == Result::OK) {
- conversionStatus = HidlUtils::audioConfigBaseToHal(config, &halConfig);
+ conversionStatus = HidlUtils::audioConfigBaseToHal(config, configBase);
}
});
if (status_t status = processReturn("getAudioProperties", ret, retval); status == NO_ERROR) {
- *sampleRate = halConfig.sample_rate;
- *mask = halConfig.channel_mask;
- *format = halConfig.format;
return conversionStatus;
} else {
return status;
diff --git a/media/libaudiohal/impl/StreamHalHidl.h b/media/libaudiohal/impl/StreamHalHidl.h
index c6db6d6..970903b 100644
--- a/media/libaudiohal/impl/StreamHalHidl.h
+++ b/media/libaudiohal/impl/StreamHalHidl.h
@@ -49,21 +49,14 @@
class StreamHalHidl : public virtual StreamHalInterface, public ConversionHelperHidl
{
public:
- // Return the sampling rate in Hz - eg. 44100.
- virtual status_t getSampleRate(uint32_t *rate);
-
// Return size of input/output buffer in bytes for this stream - eg. 4800.
virtual status_t getBufferSize(size_t *size);
- // Return the channel mask.
- virtual status_t getChannelMask(audio_channel_mask_t *mask);
-
- // Return the audio format - e.g. AUDIO_FORMAT_PCM_16_BIT.
- virtual status_t getFormat(audio_format_t *format);
-
- // Convenience method.
- virtual status_t getAudioProperties(
- uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format);
+ // Return the base configuration of the stream:
+ // - channel mask;
+ // - format - e.g. AUDIO_FORMAT_PCM_16_BIT;
+ // - sampling rate in Hz - eg. 44100.
+ virtual status_t getAudioProperties(audio_config_base_t *configBase);
// Set audio stream parameters.
virtual status_t setParameters(const String8& kvPairs);
diff --git a/media/libaudiohal/impl/StreamHalLocal.cpp b/media/libaudiohal/impl/StreamHalLocal.cpp
index e89b288..d0c375e 100644
--- a/media/libaudiohal/impl/StreamHalLocal.cpp
+++ b/media/libaudiohal/impl/StreamHalLocal.cpp
@@ -45,31 +45,15 @@
mDevice.clear();
}
-status_t StreamHalLocal::getSampleRate(uint32_t *rate) {
- *rate = mStream->get_sample_rate(mStream);
- return OK;
-}
-
status_t StreamHalLocal::getBufferSize(size_t *size) {
*size = mStream->get_buffer_size(mStream);
return OK;
}
-status_t StreamHalLocal::getChannelMask(audio_channel_mask_t *mask) {
- *mask = mStream->get_channels(mStream);
- return OK;
-}
-
-status_t StreamHalLocal::getFormat(audio_format_t *format) {
- *format = mStream->get_format(mStream);
- return OK;
-}
-
-status_t StreamHalLocal::getAudioProperties(
- uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format) {
- *sampleRate = mStream->get_sample_rate(mStream);
- *mask = mStream->get_channels(mStream);
- *format = mStream->get_format(mStream);
+status_t StreamHalLocal::getAudioProperties(audio_config_base_t *configBase) {
+ configBase->sample_rate = mStream->get_sample_rate(mStream);
+ configBase->channel_mask = mStream->get_channels(mStream);
+ configBase->format = mStream->get_format(mStream);
return OK;
}
diff --git a/media/libaudiohal/impl/StreamHalLocal.h b/media/libaudiohal/impl/StreamHalLocal.h
index e228104..b260495 100644
--- a/media/libaudiohal/impl/StreamHalLocal.h
+++ b/media/libaudiohal/impl/StreamHalLocal.h
@@ -28,21 +28,14 @@
class StreamHalLocal : public virtual StreamHalInterface
{
public:
- // Return the sampling rate in Hz - eg. 44100.
- virtual status_t getSampleRate(uint32_t *rate);
-
// Return size of input/output buffer in bytes for this stream - eg. 4800.
virtual status_t getBufferSize(size_t *size);
- // Return the channel mask.
- virtual status_t getChannelMask(audio_channel_mask_t *mask);
-
- // Return the audio format - e.g. AUDIO_FORMAT_PCM_16_BIT.
- virtual status_t getFormat(audio_format_t *format);
-
- // Convenience method.
- virtual status_t getAudioProperties(
- uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format);
+ // Return the base configuration of the stream:
+ // - channel mask;
+ // - format - e.g. AUDIO_FORMAT_PCM_16_BIT;
+ // - sampling rate in Hz - eg. 44100.
+ virtual status_t getAudioProperties(audio_config_base_t *configBase);
// Set audio stream parameters.
virtual status_t setParameters(const String8& kvPairs);
diff --git a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
index b47f536..2be12fb 100644
--- a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
@@ -31,25 +31,27 @@
class StreamHalInterface : public virtual RefBase
{
public:
- // TODO(mnaganov): Remove
- // Return the sampling rate in Hz - eg. 44100.
- virtual status_t getSampleRate(uint32_t *rate) = 0;
-
// Return size of input/output buffer in bytes for this stream - eg. 4800.
virtual status_t getBufferSize(size_t *size) = 0;
- // TODO(mnaganov): Remove
- // Return the channel mask.
- virtual status_t getChannelMask(audio_channel_mask_t *mask) = 0;
+ // Return the base configuration of the stream:
+ // - channel mask;
+ // - format - e.g. AUDIO_FORMAT_PCM_16_BIT;
+ // - sampling rate in Hz - eg. 44100.
+ virtual status_t getAudioProperties(audio_config_base_t *configBase) = 0;
- // TODO(mnaganov): Remove
- // Return the audio format - e.g. AUDIO_FORMAT_PCM_16_BIT.
- virtual status_t getFormat(audio_format_t *format) = 0;
-
- // TODO(mnaganov): Change to use audio_config_base_t
// Convenience method.
- virtual status_t getAudioProperties(
- uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format) = 0;
+ inline status_t getAudioProperties(
+ uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format) {
+ audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
+ const status_t result = getAudioProperties(&config);
+ if (result == NO_ERROR) {
+ if (sampleRate != nullptr) *sampleRate = config.sample_rate;
+ if (mask != nullptr) *mask = config.channel_mask;
+ if (format != nullptr) *format = config.format;
+ }
+ return result;
+ }
// Set audio stream parameters.
virtual status_t setParameters(const String8& kvPairs) = 0;
diff --git a/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp b/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp
index f2245b1..65a20a7 100644
--- a/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp
+++ b/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp
@@ -26,11 +26,16 @@
#include <errno.h>
#include <inttypes.h>
+#include <math.h>
#include <audio_effects/effect_hapticgenerator.h>
#include <audio_utils/format.h>
#include <system/audio.h>
+static constexpr float DEFAULT_RESONANT_FREQUENCY = 150.0f;
+static constexpr float DEFAULT_BSF_ZERO_Q = 8.0f;
+static constexpr float DEFAULT_BSF_POLE_Q = 4.0f;
+
// This is the only symbol that needs to be exported
__attribute__ ((visibility ("default")))
audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM = {
@@ -101,11 +106,11 @@
context->param.audioChannelCount = 0;
context->param.maxHapticIntensity = os::HapticScale::MUTE;
- context->param.resonantFrequency = 150.0f;
+ context->param.resonantFrequency = DEFAULT_RESONANT_FREQUENCY;
context->param.bpfQ = 1.0f;
context->param.slowEnvNormalizationPower = -0.8f;
- context->param.bsfZeroQ = 8.0f;
- context->param.bsfPoleQ = 4.0f;
+ context->param.bsfZeroQ = DEFAULT_BSF_ZERO_Q;
+ context->param.bsfPoleQ = DEFAULT_BSF_POLE_Q;
context->param.distortionCornerFrequency = 300.0f;
context->param.distortionInputGain = 0.3f;
context->param.distortionCubeThreshold = 0.1f;
@@ -173,6 +178,7 @@
addBiquadFilter(processingChain, processorsRecord, lpf);
auto bpf = createBPF(param->resonantFrequency, param->bpfQ, sampleRate, channelCount);
+ processorsRecord.bpf = bpf;
addBiquadFilter(processingChain, processorsRecord, bpf);
float normalizationPower = param->slowEnvNormalizationPower;
@@ -191,6 +197,7 @@
auto bsf = createBSF(
param->resonantFrequency, param->bsfZeroQ, param->bsfPoleQ, sampleRate, channelCount);
+ processorsRecord.bsf = bsf;
addBiquadFilter(processingChain, processorsRecord, bsf);
// The process chain captures the shared pointer of the Distortion in lambda. It will
@@ -279,7 +286,32 @@
}
break;
}
+ case HG_PARAM_VIBRATOR_INFO: {
+ if (value == nullptr || size != 2 * sizeof(float)) {
+ return -EINVAL;
+ }
+ const float resonantFrequency = *(float*) value;
+ const float qFactor = *((float *) value + 1);
+ context->param.resonantFrequency =
+ isnan(resonantFrequency) ? DEFAULT_RESONANT_FREQUENCY : resonantFrequency;
+ context->param.bsfZeroQ = isnan(qFactor) ? DEFAULT_BSF_POLE_Q : qFactor;
+ context->param.bsfPoleQ = context->param.bsfZeroQ / 2.0f;
+ if (context->processorsRecord.bpf != nullptr) {
+ context->processorsRecord.bpf->setCoefficients(
+ bpfCoefs(context->param.resonantFrequency,
+ context->param.bpfQ,
+ context->config.inputCfg.samplingRate));
+ }
+ if (context->processorsRecord.bsf != nullptr) {
+ context->processorsRecord.bsf->setCoefficients(
+ bsfCoefs(context->param.resonantFrequency,
+ context->param.bsfZeroQ,
+ context->param.bsfPoleQ,
+ context->config.inputCfg.samplingRate));
+ }
+ HapticGenerator_Reset(context);
+ } break;
default:
ALOGW("Unknown param: %d", param);
return -EINVAL;
diff --git a/media/libeffects/hapticgenerator/EffectHapticGenerator.h b/media/libeffects/hapticgenerator/EffectHapticGenerator.h
index d2d7afe..96b744a 100644
--- a/media/libeffects/hapticgenerator/EffectHapticGenerator.h
+++ b/media/libeffects/hapticgenerator/EffectHapticGenerator.h
@@ -69,6 +69,11 @@
std::vector<std::shared_ptr<Ramp>> ramps;
std::vector<std::shared_ptr<SlowEnvelope>> slowEnvs;
std::vector<std::shared_ptr<Distortion>> distortions;
+
+ // Cache band-pass filter and band-stop filter for updating parameters
+ // according to vibrator info
+ std::shared_ptr<HapticBiquadFilter> bpf;
+ std::shared_ptr<HapticBiquadFilter> bsf;
};
// A structure to keep all the context for HapticGenerator.
diff --git a/media/libeffects/hapticgenerator/Processors.cpp b/media/libeffects/hapticgenerator/Processors.cpp
index 79a4e2c..4fe3a75 100644
--- a/media/libeffects/hapticgenerator/Processors.cpp
+++ b/media/libeffects/hapticgenerator/Processors.cpp
@@ -211,9 +211,9 @@
}
BiquadFilterCoefficients bsfCoefs(const float ringingFrequency,
- const float sampleRate,
const float zq,
- const float pq) {
+ const float pq,
+ const float sampleRate) {
BiquadFilterCoefficients coefficient;
const auto [zeroReal, zeroImg] = getComplexPoleZ(ringingFrequency, zq, sampleRate);
float zeroCoeff1 = -2 * zeroReal;
@@ -275,7 +275,7 @@
const float pq,
const float sampleRate,
const size_t channelCount) {
- BiquadFilterCoefficients coefficient = bsfCoefs(ringingFrequency, sampleRate, zq, pq);
+ BiquadFilterCoefficients coefficient = bsfCoefs(ringingFrequency, zq, pq, sampleRate);
return std::make_shared<HapticBiquadFilter>(channelCount, coefficient);
}
diff --git a/media/libeffects/hapticgenerator/Processors.h b/media/libeffects/hapticgenerator/Processors.h
index 452a985..74ca77d 100644
--- a/media/libeffects/hapticgenerator/Processors.h
+++ b/media/libeffects/hapticgenerator/Processors.h
@@ -102,9 +102,9 @@
const float sampleRate);
BiquadFilterCoefficients bsfCoefs(const float ringingFrequency,
- const float sampleRate,
const float zq,
- const float pq);
+ const float pq,
+ const float sampleRate);
std::shared_ptr<HapticBiquadFilter> createLPF(const float cornerFrequency,
const float sampleRate,
diff --git a/media/libeffects/lvm/lib/Android.bp b/media/libeffects/lvm/lib/Android.bp
index 5d75055..7998879 100644
--- a/media/libeffects/lvm/lib/Android.bp
+++ b/media/libeffects/lvm/lib/Android.bp
@@ -63,7 +63,6 @@
"Common/src/DC_2I_D16_TRC_WRA_01_Init.cpp",
"Common/src/Copy_16.cpp",
"Common/src/MonoTo2I_32.cpp",
- "Common/src/LoadConst_32.cpp",
"Common/src/dB_to_Lin32.cpp",
"Common/src/Shift_Sat_v16xv16.cpp",
"Common/src/Shift_Sat_v32xv32.cpp",
@@ -148,7 +147,6 @@
"Reverb/src/LVREV_Process.cpp",
"Reverb/src/LVREV_SetControlParameters.cpp",
"Reverb/src/LVREV_Tables.cpp",
- "Common/src/LoadConst_32.cpp",
"Common/src/From2iToMono_32.cpp",
"Common/src/Mult3s_32x16.cpp",
"Common/src/Copy_16.cpp",
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Init.cpp b/media/libeffects/lvm/lib/Bundle/src/LVM_Init.cpp
index 9f5f448..12b86f3 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Init.cpp
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Init.cpp
@@ -137,9 +137,9 @@
pInstance->pBufferManagement->pScratch = (LVM_FLOAT*)pInstance->pScratch;
- LoadConst_Float(0, /* Clear the input delay buffer */
- (LVM_FLOAT*)&pInstance->pBufferManagement->InDelayBuffer,
- (LVM_INT16)(LVM_MAX_CHANNELS * MIN_INTERNAL_BLOCKSIZE));
+ memset(pInstance->pBufferManagement->InDelayBuffer, 0,
+ LVM_MAX_CHANNELS * MIN_INTERNAL_BLOCKSIZE *
+ sizeof(pInstance->pBufferManagement->InDelayBuffer[0]));
pInstance->pBufferManagement->InDelaySamples =
MIN_INTERNAL_BLOCKSIZE; /* Set the number of delay samples */
pInstance->pBufferManagement->OutDelaySamples = 0; /* No samples in the output buffer */
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Process.cpp b/media/libeffects/lvm/lib/Bundle/src/LVM_Process.cpp
index 20058a1..4eea04f 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Process.cpp
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Process.cpp
@@ -23,6 +23,7 @@
#include <system/audio.h>
#include "LVM_Private.h"
+#include "ScalarArithmetic.h"
#include "VectorArithmetic.h"
#include "LVM_Coeffs.h"
@@ -178,6 +179,9 @@
* Apply the filter
*/
pInstance->pTEBiquad->process(pProcessed, pProcessed, NrFrames);
+ for (auto i = 0; i < NrChannels * NrFrames; i++) {
+ pProcessed[i] = LVM_Clamp(pProcessed[i]);
+ }
}
/*
* Volume balance
diff --git a/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h b/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h
index 18de85b..10f351e 100644
--- a/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h
+++ b/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h
@@ -24,8 +24,6 @@
VARIOUS FUNCTIONS
***********************************************************************************/
-void LoadConst_Float(const LVM_FLOAT val, LVM_FLOAT* dst, LVM_INT16 n);
-
void Copy_Float(const LVM_FLOAT* src, LVM_FLOAT* dst, LVM_INT16 n);
void Copy_Float_Mc_Stereo(const LVM_FLOAT* src, LVM_FLOAT* dst, LVM_INT16 NrFrames,
LVM_INT32 NrChannels);
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_D16C31_SAT.cpp b/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_D16C31_SAT.cpp
index be19fa0..5a67bda 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_D16C31_SAT.cpp
+++ b/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_D16C31_SAT.cpp
@@ -19,6 +19,7 @@
INCLUDE FILES
***********************************************************************************/
+#include <string.h>
#include "LVC_Mixer_Private.h"
#include "VectorArithmetic.h"
#include "ScalarArithmetic.h"
@@ -68,7 +69,7 @@
if (HardMixing) {
if (pInstance->Target == 0)
- LoadConst_Float(0.0, dst, n);
+ memset(dst, 0, n * sizeof(*dst));
else {
if ((pInstance->Target) != 1.0f)
Mult3s_Float(src, (pInstance->Target), dst, n);
@@ -150,7 +151,7 @@
if (HardMixing) {
if (pInstance->Target == 0)
- LoadConst_Float(0.0, dst, NrFrames * NrChannels);
+ memset(dst, 0, NrFrames * NrChannels * sizeof(*dst));
else {
if ((pInstance->Target) != 1.0f)
Mult3s_Float(src, (pInstance->Target), dst, NrFrames * NrChannels);
diff --git a/media/libeffects/lvm/lib/Common/src/LoadConst_32.cpp b/media/libeffects/lvm/lib/Common/src/LoadConst_32.cpp
deleted file mode 100644
index df7a558..0000000
--- a/media/libeffects/lvm/lib/Common/src/LoadConst_32.cpp
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (C) 2004-2010 NXP Software
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**********************************************************************************
- INCLUDE FILES
-***********************************************************************************/
-
-#include "VectorArithmetic.h"
-
-/**********************************************************************************
- FUNCTION LoadConst_32
-***********************************************************************************/
-void LoadConst_Float(const LVM_FLOAT val, LVM_FLOAT* dst, LVM_INT16 n) {
- LVM_INT16 ii;
-
- for (ii = n; ii != 0; ii--) {
- *dst = val;
- dst++;
- }
-
- return;
-}
-
-/**********************************************************************************/
diff --git a/media/libeffects/lvm/lib/Common/src/MixSoft_1St_D32C31_WRA.cpp b/media/libeffects/lvm/lib/Common/src/MixSoft_1St_D32C31_WRA.cpp
index 8408962..58a9102 100644
--- a/media/libeffects/lvm/lib/Common/src/MixSoft_1St_D32C31_WRA.cpp
+++ b/media/libeffects/lvm/lib/Common/src/MixSoft_1St_D32C31_WRA.cpp
@@ -19,6 +19,7 @@
INCLUDE FILES
***********************************************************************************/
+#include <string.h>
#include "Mixer_private.h"
#include "VectorArithmetic.h"
@@ -61,7 +62,7 @@
if (HardMixing) {
if (pInstance->Target == 0)
- LoadConst_Float(0, dst, n);
+ memset(dst, 0, n * sizeof(*dst));
else if ((pInstance->Target) == 1.0f) {
if (src != dst) Copy_Float((LVM_FLOAT*)src, (LVM_FLOAT*)dst, (LVM_INT16)(n));
} else
diff --git a/media/libeffects/lvm/lib/Reverb/src/LVREV_ClearAudioBuffers.cpp b/media/libeffects/lvm/lib/Reverb/src/LVREV_ClearAudioBuffers.cpp
index d4b321f..be3505f 100644
--- a/media/libeffects/lvm/lib/Reverb/src/LVREV_ClearAudioBuffers.cpp
+++ b/media/libeffects/lvm/lib/Reverb/src/LVREV_ClearAudioBuffers.cpp
@@ -60,7 +60,8 @@
pLVREV_Private->pRevLPFBiquad->clear();
for (size_t i = 0; i < pLVREV_Private->InstanceParams.NumDelays; i++) {
pLVREV_Private->revLPFBiquad[i]->clear();
- LoadConst_Float(0, pLVREV_Private->pDelay_T[i], LVREV_MAX_T_DELAY[i]);
+ memset(pLVREV_Private->pDelay_T[i], 0, LVREV_MAX_T_DELAY[i] *
+ sizeof(pLVREV_Private->pDelay_T[i][0]));
}
return LVREV_SUCCESS;
}
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_ReverbGenerator.cpp b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_ReverbGenerator.cpp
index c5b6598..de23d07 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_ReverbGenerator.cpp
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_ReverbGenerator.cpp
@@ -81,10 +81,7 @@
pConfig->DelaySize =
(pParams->NrChannels == FCC_1) ? (LVM_INT16)Delay : (LVM_INT16)(FCC_2 * Delay);
pConfig->DelayOffset = 0;
- LoadConst_Float(0, /* Value */
- (LVM_FLOAT*)&pConfig->StereoSamples[0], /* Destination */
- /* Number of words */
- (LVM_UINT16)(sizeof(pConfig->StereoSamples) / sizeof(LVM_FLOAT)));
+ memset(pConfig->StereoSamples, 0, sizeof(pConfig->StereoSamples));
/*
* Setup the filters
*/
diff --git a/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh b/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
index df7ca5a..7571a24 100755
--- a/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
+++ b/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
@@ -53,16 +53,16 @@
flags_arr=(
"-csE"
"-eqE"
- "-tE"
- "-csE -tE -eqE"
+ "-tE -trebleLvl:15"
+ "-csE -tE -trebleLvl:15 -eqE"
"-bE -M"
- "-csE -tE"
- "-csE -eqE" "-tE -eqE"
- "-csE -tE -bE -M -eqE"
- "-tE -eqE -vcBal:96 -M"
- "-tE -eqE -vcBal:-96 -M"
- "-tE -eqE -vcBal:0 -M"
- "-tE -eqE -bE -vcBal:30 -M"
+ "-csE -tE -trebleLvl:15"
+ "-csE -eqE" "-tE -trebleLvl:15 -eqE"
+ "-csE -tE -trebleLvl:15 -bE -M -eqE"
+ "-tE -trebleLvl:15 -eqE -vcBal:96 -M"
+ "-tE -trebleLvl:15 -eqE -vcBal:-96 -M"
+ "-tE -trebleLvl:15 -eqE -vcBal:0 -M"
+ "-tE -trebleLvl:15 -eqE -bE -vcBal:30 -M"
)
fs_arr=(
diff --git a/media/libeffects/lvm/tests/lvmtest.cpp b/media/libeffects/lvm/tests/lvmtest.cpp
index e484a1a..e65228c 100644
--- a/media/libeffects/lvm/tests/lvmtest.cpp
+++ b/media/libeffects/lvm/tests/lvmtest.cpp
@@ -79,6 +79,7 @@
int bassEffectLevel = 0;
int eqPresetLevel = 0;
int frameLength = 256;
+ int trebleEffectLevel = 0;
LVM_BE_Mode_en bassEnable = LVM_BE_OFF;
LVM_TE_Mode_en trebleEnable = LVM_TE_OFF;
LVM_EQNB_Mode_en eqEnable = LVM_EQNB_OFF;
@@ -303,10 +304,6 @@
params->PSA_Enable = LVM_PSA_OFF;
params->PSA_PeakDecayRate = LVM_PSA_SPEED_MEDIUM;
- /* TE Control parameters */
- params->TE_OperatingMode = LVM_TE_OFF;
- params->TE_EffectLevel = 0;
-
/* Activate the initial settings */
LvmStatus = LVM_SetControlParameters(pContext->pBundledContext->hInstance, params);
@@ -445,6 +442,7 @@
/* Treble Enhancement parameters */
params->TE_OperatingMode = plvmConfigParams->trebleEnable;
+ params->TE_EffectLevel = plvmConfigParams->trebleEffectLevel;
/* PSA Control parameters */
params->PSA_Enable = LVM_PSA_ON;
@@ -604,6 +602,15 @@
return -1;
}
lvmConfigParams.eqPresetLevel = eqPresetLevel;
+ } else if (!strncmp(argv[i], "-trebleLvl:", 11)) {
+ const int trebleEffectLevel = atoi(argv[i] + 11);
+ if (trebleEffectLevel > LVM_TE_MAX_EFFECTLEVEL ||
+ trebleEffectLevel < LVM_TE_MIN_EFFECTLEVEL) {
+ printf("Error: Unsupported Treble Effect Level : %d\n", trebleEffectLevel);
+ printUsage();
+ return -1;
+ }
+ lvmConfigParams.trebleEffectLevel = trebleEffectLevel;
} else if (!strcmp(argv[i], "-bE")) {
lvmConfigParams.bassEnable = LVM_BE_ON;
} else if (!strcmp(argv[i], "-eqE")) {
diff --git a/media/libmediaformatshaper/CodecProperties.cpp b/media/libmediaformatshaper/CodecProperties.cpp
index d733c57..e6b3c46 100644
--- a/media/libmediaformatshaper/CodecProperties.cpp
+++ b/media/libmediaformatshaper/CodecProperties.cpp
@@ -19,9 +19,14 @@
#include <utils/Log.h>
#include <string>
+#include <stdlib.h>
#include <media/formatshaper/CodecProperties.h>
+
+// we aren't going to mess with shaping points dimensions beyond this
+static const int32_t DIMENSION_LIMIT = 16384;
+
namespace android {
namespace mediaformatshaper {
@@ -63,17 +68,12 @@
ALOGD("setFeatureValue(%s,%d)", key.c_str(), value);
mFeatures.insert({key, value});
- if (!strcmp(key.c_str(), "vq-minimum-quality")) {
- setSupportedMinimumQuality(value);
- } else if (!strcmp(key.c_str(), "vq-supports-qp")) { // key from prototyping
+ if (!strcmp(key.c_str(), "qp-bounds")) { // official key
setSupportsQp(1);
- } else if (!strcmp(key.c_str(), "qp-bounds")) { // official key
+ } else if (!strcmp(key.c_str(), "vq-supports-qp")) { // key from prototyping
setSupportsQp(1);
- } else if (!strcmp(key.c_str(), "vq-target-qpmax")) {
- setTargetQpMax(value);
- } else if (!strcmp(key.c_str(), "vq-target-bppx100")) {
- double bpp = value / 100.0;
- setBpp(bpp);
+ } else if (!strcmp(key.c_str(), "vq-minimum-quality")) {
+ setSupportedMinimumQuality(1);
}
}
@@ -90,6 +90,182 @@
return false;
}
+// Tuning values (which differ from Features)
+// this is where we set up things like target bitrates and QP ranges
+// NB the tuning values arrive as a string, allowing us to convert it into an appropriate
+// format (int, float, ranges, other combinations)
+//
+void CodecProperties::setTuningValue(std::string key, std::string value) {
+ ALOGD("setTuningValue(%s,%s)", key.c_str(), value.c_str());
+ mTunings.insert({key, value});
+
+ bool legal = false;
+ // NB: old school strtol() because std::stoi() throws exceptions
+ if (!strcmp(key.c_str(), "vq-target-qpmax")) {
+ const char *p = value.c_str();
+ char *q;
+ int32_t iValue = strtol(p, &q, 0);
+ if (q != p) {
+ setTargetQpMax(iValue);
+ legal = true;
+ }
+ } else if (!strcmp(key.c_str(), "vq-target-bpp")) {
+ const char *p = value.c_str();
+ char *q;
+ double bpp = strtod(p, &q);
+ if (q != p) {
+ setBpp(bpp);
+ legal = true;
+ }
+ } else if (!strncmp(key.c_str(), "vq-target-bpp-", strlen("vq-target-bpp-"))) {
+ std::string resolution = key.substr(strlen("vq-target-bpp-"));
+ if (bppPoint(resolution, value)) {
+ legal = true;
+ }
+ } else if (!strcmp(key.c_str(), "vq-target-bppx100")) {
+ // legacy, prototyping
+ const char *p = value.c_str();
+ char *q;
+ int32_t iValue = strtol(p, &q, 0);
+ if (q != p) {
+ double bpp = iValue / 100.0;
+ setBpp(bpp);
+ legal = true;
+ }
+ } else {
+ legal = true;
+ }
+
+ if (!legal) {
+ ALOGW("setTuningValue() unable to apply tuning '%s' with value '%s'",
+ key.c_str(), value.c_str());
+ }
+ return;
+}
+
+bool CodecProperties::getTuningValue(std::string key, std::string &value) {
+ ALOGV("getTuningValue(%s)", key.c_str());
+ auto mapped = mFeatures.find(key);
+ if (mapped != mFeatures.end()) {
+ value = mapped->second;
+ return true;
+ }
+ return false;
+}
+
+bool CodecProperties::bppPoint(std::string resolution, std::string value) {
+
+ int32_t width = 0;
+ int32_t height = 0;
+ double bpp = -1;
+
+ // resolution is "WxH", "W*H" or a standard name like "720p"
+ if (resolution == "1080p") {
+ width = 1080; height = 1920;
+ } else if (resolution == "720p") {
+ width = 720; height = 1280;
+ } else if (resolution == "540p") {
+ width = 540; height = 960;
+ } else if (resolution == "480p") {
+ width = 480; height = 854;
+ } else {
+ size_t sep = resolution.find('x');
+ if (sep == std::string::npos) {
+ sep = resolution.find('*');
+ }
+ if (sep == std::string::npos) {
+ ALOGW("unable to parse resolution: '%s'", resolution.c_str());
+ return false;
+ }
+ std::string w = resolution.substr(0, sep);
+ std::string h = resolution.substr(sep+1);
+
+ char *q;
+ const char *p = w.c_str();
+ width = strtol(p, &q, 0);
+ if (q == p) {
+ width = -1;
+ }
+ p = h.c_str();
+ height = strtol(p, &q, 0);
+ if (q == p) {
+ height = -1;
+ }
+ if (width <= 0 || height <= 0 || width > DIMENSION_LIMIT || height > DIMENSION_LIMIT) {
+ ALOGW("unparseable: width, height '%s'", resolution.c_str());
+ return false;
+ }
+ }
+
+ const char *p = value.c_str();
+ char *q;
+ bpp = strtod(p, &q);
+ if (q == p) {
+ ALOGW("unparseable bpp '%s'", value.c_str());
+ return false;
+ }
+
+ struct bpp_point *point = (struct bpp_point*) malloc(sizeof(*point));
+ if (point == nullptr) {
+ ALOGW("unable to allocate memory for bpp point");
+ return false;
+ }
+
+ point->pixels = width * height;
+ point->width = width;
+ point->height = height;
+ point->bpp = bpp;
+
+ if (mBppPoints == nullptr) {
+ point->next = nullptr;
+ mBppPoints = point;
+ } else if (point->pixels < mBppPoints->pixels) {
+ // at the front
+ point->next = mBppPoints;
+ mBppPoints = point;
+ } else {
+ struct bpp_point *after = mBppPoints;
+ while (after->next) {
+ if (point->pixels > after->next->pixels) {
+ after = after->next;
+ continue;
+ }
+
+ // insert before after->next
+ point->next = after->next;
+ after->next = point;
+ break;
+ }
+ if (after->next == nullptr) {
+ // hasn't gone in yet
+ point->next = nullptr;
+ after->next = point;
+ }
+ }
+
+ return true;
+}
+
+double CodecProperties::getBpp(int32_t width, int32_t height) {
+ // look in the per-resolution list
+
+ int32_t pixels = width * height;
+
+ if (mBppPoints) {
+ struct bpp_point *point = mBppPoints;
+ while (point && point->pixels < pixels) {
+ point = point->next;
+ }
+ if (point) {
+ ALOGV("getBpp(w=%d,h=%d) returns %f from bpppoint w=%d h=%d",
+ width, height, point->bpp, point->width, point->height);
+ return point->bpp;
+ }
+ }
+
+ ALOGV("defaulting to %f bpp", mBpp);
+ return mBpp;
+}
std::string CodecProperties::getMapping(std::string key, std::string kind) {
ALOGV("getMapping(key %s, kind %s )", key.c_str(), kind.c_str());
diff --git a/media/libmediaformatshaper/CodecSeeding.cpp b/media/libmediaformatshaper/CodecSeeding.cpp
index 629b405..a7fcc66 100644
--- a/media/libmediaformatshaper/CodecSeeding.cpp
+++ b/media/libmediaformatshaper/CodecSeeding.cpp
@@ -26,56 +26,66 @@
namespace mediaformatshaper {
/*
- * a block of pre-loads; things the library seeds into the codecproperties based
+ * a block of pre-loaded tunings for codecs.
+ *
+ * things the library seeds into the codecproperties based
* on the mediaType.
* XXX: parsing from a file is likely better than embedding in code.
*/
typedef struct {
+ bool overrideable;
const char *key;
- int32_t value;
-} preloadFeature_t;
+ const char *value;
+} preloadTuning_t;
typedef struct {
const char *mediaType;
- preloadFeature_t *features;
-} preloadProperties_t;
+ preloadTuning_t *features;
+} preloadTunings_t;
/*
* 240 = 2.4 bits per pixel-per-second == 5mbps@1080, 2.3mbps@720p, which is about where
* we want our initial floor for now.
*/
-static preloadFeature_t featuresAvc[] = {
- {"vq-target-bppx100", 240},
- {nullptr, 0}
+static preloadTuning_t featuresAvc[] = {
+ {true, "vq-target-bpp", "2.45"},
+ {true, "vq-target-bpp-1080p", "2.40"},
+ {true, "vq-target-bpp-540p", "2.60"},
+ {true, "vq-target-bpp-480p", "3.00"},
+ {true, "vq-target-qpmax", "40"},
+ {true, nullptr, 0}
};
-static preloadFeature_t featuresHevc[] = {
- {"vq-target-bppx100", 240},
- {nullptr, 0}
+static preloadTuning_t featuresHevc[] = {
+ {true, "vq-target-bpp", "2.30"},
+ {true, "vq-target-qpmax", "40"}, // nop, since hevc codecs don't declare qp support
+ {true, nullptr, 0}
};
-static preloadFeature_t featuresGenericVideo[] = {
- {"vq-target-bppx100", 240},
- {nullptr, 0}
+static preloadTuning_t featuresGenericVideo[] = {
+ {true, "vq-target-bpp", "2.40"},
+ {true, nullptr, 0}
};
-static preloadProperties_t preloadProperties[] = {
+static preloadTunings_t preloadTunings[] = {
{ "video/avc", featuresAvc},
{ "video/hevc", &featuresHevc[0]},
// wildcard for any video format not already captured
{ "video/*", &featuresGenericVideo[0]},
+
{ nullptr, nullptr}
};
-void CodecProperties::Seed() {
- ALOGV("Seed: for codec %s, mediatype %s", mName.c_str(), mMediaType.c_str());
+void CodecProperties::addMediaDefaults(bool overrideable) {
+ ALOGD("Seed: codec %s, mediatype %s, overrideable %d",
+ mName.c_str(), mMediaType.c_str(), overrideable);
// load me up with initial configuration data
int count = 0;
- for (int i=0;; i++) {
- preloadProperties_t *p = &preloadProperties[i];
+ for (int i = 0; ; i++) {
+ preloadTunings_t *p = &preloadTunings[i];
if (p->mediaType == nullptr) {
break;
}
@@ -100,11 +110,14 @@
// walk through, filling things
if (p->features != nullptr) {
for (int j=0;; j++) {
- preloadFeature_t *q = &p->features[j];
+ preloadTuning_t *q = &p->features[j];
if (q->key == nullptr) {
break;
}
- setFeatureValue(q->key, q->value);
+ if (q->overrideable != overrideable) {
+ continue;
+ }
+ setTuningValue(q->key, q->value);
count++;
}
break;
@@ -113,13 +126,18 @@
ALOGV("loaded %d preset values", count);
}
-// a chance, as we register the codec and accept no further updates, to
-// override any poor configuration that arrived from the device's XML files.
+// a chance, as we create the codec to inject any default behaviors we want.
+// XXX: consider whether we need pre/post or just post. it affects what can be
+// overridden by way of the codec XML
//
+void CodecProperties::Seed() {
+ ALOGV("Seed: for codec %s, mediatype %s", mName.c_str(), mMediaType.c_str());
+ addMediaDefaults(true);
+}
+
void CodecProperties::Finish() {
ALOGV("Finish: for codec %s, mediatype %s", mName.c_str(), mMediaType.c_str());
-
- // currently a no-op
+ addMediaDefaults(false);
}
} // namespace mediaformatshaper
diff --git a/media/libmediaformatshaper/FormatShaper.cpp b/media/libmediaformatshaper/FormatShaper.cpp
index a52edc2..42502e0 100644
--- a/media/libmediaformatshaper/FormatShaper.cpp
+++ b/media/libmediaformatshaper/FormatShaper.cpp
@@ -99,6 +99,23 @@
return 0;
}
+int setTuning(shaperHandle_t shaper, const char *tuning, const char *value) {
+ ALOGV("setTuning: tuning %s value %s", tuning, value);
+ CodecProperties *codec = (CodecProperties*) shaper;
+ if (codec == nullptr) {
+ return -1;
+ }
+ // must not yet be registered
+ if (codec->isRegistered()) {
+ return -1;
+ }
+
+ // save a map of all features
+ codec->setTuningValue(tuning, value);
+
+ return 0;
+}
+
/*
* The routines that manage finding, creating, and registering the shapers.
*/
@@ -176,6 +193,8 @@
.shapeFormat = shapeFormat,
.getMappings = getMappings,
.getReverseMappings = getReverseMappings,
+
+ .setTuning = setTuning,
};
} // namespace mediaformatshaper
diff --git a/media/libmediaformatshaper/VQApply.cpp b/media/libmediaformatshaper/VQApply.cpp
index 39a5e19..08e23cc 100644
--- a/media/libmediaformatshaper/VQApply.cpp
+++ b/media/libmediaformatshaper/VQApply.cpp
@@ -48,6 +48,15 @@
//
static const int BITRATE_MODE_VBR = 1;
+
+// constants we use within the calculations
+//
+constexpr double BITRATE_LEAVE_UNTOUCHED = 2.0;
+constexpr double BITRATE_QP_UNAVAILABLE = 1.20;
+// 10% didn't work so hot on bonito (with no QP support)
+// 15% is next.. still leaves a few short
+// 20% ? this is on the edge of what I want do do
+
//
// Caller retains ownership of and responsibility for inFormat
//
@@ -69,69 +78,82 @@
}
//
- // apply any and all tools that we have.
+ // consider any and all tools available
// -- qp
// -- minimum bits-per-pixel
//
- if (!codec->supportsQp()) {
- ALOGD("minquality: no qp bounding in codec %s", codec->getName().c_str());
- } else {
- // use a (configurable) QP value to force better quality
- //
+ int64_t bitrateChosen = 0;
+ int32_t qpChosen = INT32_MAX;
+
+ int64_t bitrateConfigured = 0;
+ int32_t bitrateConfiguredTmp = 0;
+ (void) AMediaFormat_getInt32(inFormat, AMEDIAFORMAT_KEY_BIT_RATE, &bitrateConfiguredTmp);
+ bitrateConfigured = bitrateConfiguredTmp;
+ bitrateChosen = bitrateConfigured;
+
+ int32_t width = 0;
+ (void) AMediaFormat_getInt32(inFormat, AMEDIAFORMAT_KEY_WIDTH, &width);
+ int32_t height = 0;
+ (void) AMediaFormat_getInt32(inFormat, AMEDIAFORMAT_KEY_HEIGHT, &height);
+ int64_t pixels = ((int64_t)width) * height;
+ double minimumBpp = codec->getBpp(width, height);
+
+ int64_t bitrateFloor = pixels * minimumBpp;
+ if (bitrateFloor > INT32_MAX) bitrateFloor = INT32_MAX;
+
+ // if we are far enough above the target bpp, leave it alone
+ //
+ ALOGV("bitrate: configured %" PRId64 " floor %" PRId64, bitrateConfigured, bitrateFloor);
+ if (bitrateConfigured >= BITRATE_LEAVE_UNTOUCHED * bitrateFloor) {
+ ALOGV("high enough bitrate: configured %" PRId64 " >= %f * floor %" PRId64,
+ bitrateConfigured, BITRATE_LEAVE_UNTOUCHED, bitrateFloor);
+ return 0;
+ }
+
+ // raise anything below the bitrate floor
+ if (bitrateConfigured < bitrateFloor) {
+ ALOGD("raise bitrate: configured %" PRId64 " to floor %" PRId64,
+ bitrateConfigured, bitrateFloor);
+ bitrateChosen = bitrateFloor;
+ }
+
+ bool qpPresent = hasQp(inFormat);
+
+ // add QP, if not already present
+ if (!qpPresent) {
int32_t qpmax = codec->targetQpMax();
- int32_t qpmaxUser = INT32_MAX;
- if (hasQp(inFormat)) {
- (void) AMediaFormat_getInt32(inFormat, AMEDIAFORMAT_VIDEO_QP_MAX, &qpmaxUser);
- ALOGD("minquality by QP: format already sets QP");
- }
-
- // if the system didn't do one, use what the user provided
- if (qpmax == 0 && qpmaxUser != INT32_MAX) {
- qpmax = qpmaxUser;
- }
- // XXX: if both said something, how do we want to reconcile that
-
- if (qpmax > 0) {
- ALOGD("minquality by QP: inject %s=%d", AMEDIAFORMAT_VIDEO_QP_MAX, qpmax);
- AMediaFormat_setInt32(inFormat, AMEDIAFORMAT_VIDEO_QP_MAX, qpmax);
-
- // force spreading the QP across frame types, since we imposing a value
- qpSpreadMaxPerFrameType(inFormat, info->qpDelta, info->qpMax, /* override */ true);
+ if (qpmax != INT32_MAX) {
+ ALOGV("choosing qp=%d", qpmax);
+ qpChosen = qpmax;
}
}
- double bpp = codec->getBpp();
- if (bpp > 0.0) {
- // if we've decided to use bits-per-pixel (per second) to drive the quality
- //
- // (properly phrased as 'bits per second per pixel' so that it's resolution
- // and framerate agnostic
- //
- // all of these is structured so that a missing value cleanly gets us to a
- // non-faulting value of '0' for the minimum bits-per-pixel.
- //
- int32_t width = 0;
- (void) AMediaFormat_getInt32(inFormat, AMEDIAFORMAT_KEY_WIDTH, &width);
- int32_t height = 0;
- (void) AMediaFormat_getInt32(inFormat, AMEDIAFORMAT_KEY_HEIGHT, &height);
- int32_t bitrateConfigured = 0;
- (void) AMediaFormat_getInt32(inFormat, AMEDIAFORMAT_KEY_BIT_RATE, &bitrateConfigured);
-
- int64_t pixels = ((int64_t)width) * height;
- int64_t bitrateFloor = pixels * bpp;
-
- if (bitrateFloor > INT32_MAX) bitrateFloor = INT32_MAX;
-
- ALOGD("minquality/bitrate: target %d floor %" PRId64 "(%.3f bpp * (%d w * %d h)",
- bitrateConfigured, bitrateFloor, codec->getBpp(), height, width);
-
- if (bitrateConfigured < bitrateFloor) {
- ALOGD("minquality/target bitrate raised from %d to %" PRId64 " bps",
- bitrateConfigured, bitrateFloor);
- AMediaFormat_setInt32(inFormat, AMEDIAFORMAT_KEY_BIT_RATE, (int32_t)bitrateFloor);
+ // if QP is desired but not supported, compensate with additional bits
+ if (!codec->supportsQp()) {
+ if (qpPresent || qpChosen != INT32_MAX) {
+ ALOGD("minquality: desired QP, but unsupported, boost bitrate %" PRId64 " to %" PRId64,
+ bitrateChosen, (int64_t)(bitrateChosen * BITRATE_QP_UNAVAILABLE));
+ bitrateChosen = bitrateChosen * BITRATE_QP_UNAVAILABLE;
+ qpChosen = INT32_MAX;
}
}
+ // apply our chosen values
+ //
+ if (qpChosen != INT32_MAX) {
+ ALOGD("minquality by QP: inject %s=%d", AMEDIAFORMAT_VIDEO_QP_MAX, qpChosen);
+ AMediaFormat_setInt32(inFormat, AMEDIAFORMAT_VIDEO_QP_MAX, qpChosen);
+
+ // force spreading the QP across frame types, since we are imposing a value
+ qpSpreadMaxPerFrameType(inFormat, info->qpDelta, info->qpMax, /* override */ true);
+ }
+
+ if (bitrateChosen != bitrateConfigured) {
+ ALOGD("minquality/target bitrate raised from %" PRId64 " to %" PRId64 " bps",
+ bitrateConfigured, bitrateChosen);
+ AMediaFormat_setInt32(inFormat, AMEDIAFORMAT_KEY_BIT_RATE, (int32_t)bitrateChosen);
+ }
+
return 0;
}
diff --git a/media/libmediaformatshaper/include/media/formatshaper/CodecProperties.h b/media/libmediaformatshaper/include/media/formatshaper/CodecProperties.h
index e5cc9cf..ff7051f 100644
--- a/media/libmediaformatshaper/include/media/formatshaper/CodecProperties.h
+++ b/media/libmediaformatshaper/include/media/formatshaper/CodecProperties.h
@@ -21,6 +21,8 @@
#include <mutex>
#include <string>
+#include <inttypes.h>
+
#include <utils/RefBase.h>
namespace android {
@@ -56,6 +58,10 @@
void setFeatureValue(std::string key, int32_t value);
bool getFeatureValue(std::string key, int32_t *valuep);
+ // keep a map of all tunings and their parameters
+ void setTuningValue(std::string key, std::string value);
+ bool getTuningValue(std::string key, std::string &value);
+
// does the codec support the Android S minimum quality rules
void setSupportedMinimumQuality(int vmaf);
int supportedMinimumQuality();
@@ -69,7 +75,7 @@
// This is used to calculate a minimum bitrate for any particular resolution.
// A 1080p (1920*1080 = 2073600 pixels) to be encoded at 5Mbps has a bpp == 2.41
void setBpp(double bpp) { mBpp = bpp;}
- double getBpp() {return mBpp;}
+ double getBpp(int32_t width, int32_t height);
// Does this codec support QP bounding
// The getMapping() methods provide any needed mapping to non-standard keys.
@@ -88,15 +94,31 @@
std::string mMediaType;
int mApi = 0;
int mMinimumQuality = 0;
- int mTargetQpMax = 0;
+ int mTargetQpMax = INT32_MAX;
bool mSupportsQp = false;
double mBpp = 0.0;
+ // allow different target bits-per-pixel based on resolution
+ // similar to codec 'performance points'
+ // uses 'next largest' (by pixel count) point as minimum bpp
+ struct bpp_point {
+ struct bpp_point *next;
+ int32_t pixels;
+ int32_t width, height;
+ double bpp;
+ };
+ struct bpp_point *mBppPoints = nullptr;
+ bool bppPoint(std::string resolution, std::string value);
+
std::mutex mMappingLock;
// XXX figure out why I'm having problems getting compiler to like GUARDED_BY
std::map<std::string, std::string> mMappings /*GUARDED_BY(mMappingLock)*/ ;
std::map<std::string, int32_t> mFeatures /*GUARDED_BY(mMappingLock)*/ ;
+ std::map<std::string, std::string> mTunings /*GUARDED_BY(mMappingLock)*/ ;
+
+ // Seed() and Finish() use this as the underlying implementation
+ void addMediaDefaults(bool overrideable);
bool mIsRegistered = false;
diff --git a/media/libmediaformatshaper/include/media/formatshaper/FormatShaper.h b/media/libmediaformatshaper/include/media/formatshaper/FormatShaper.h
index 8ad81cd..a1747cc 100644
--- a/media/libmediaformatshaper/include/media/formatshaper/FormatShaper.h
+++ b/media/libmediaformatshaper/include/media/formatshaper/FormatShaper.h
@@ -84,6 +84,12 @@
typedef int (*setFeature_t)(shaperHandle_t shaper, const char *feature, int value);
/*
+ * establishes that codec "codecName" encoding for "mediaType" supports the indicated
+ * tuning at the indicated value
+ */
+typedef int (*setTuning_t)(shaperHandle_t shaper, const char *feature, const char * value);
+
+/*
* The expectation is that the client will implement a flow similar to the following when
* setting up an encoding.
*
@@ -118,6 +124,10 @@
shapeFormat_t shapeFormat;
getMappings_t getMappings;
getMappings_t getReverseMappings;
+
+ setTuning_t setTuning;
+
+ // additions happen at the end of the structure
} FormatShaperOps_t;
// versioninf information
diff --git a/media/libmediametrics/include/MediaMetricsConstants.h b/media/libmediametrics/include/MediaMetricsConstants.h
index de4f8d4..383bae8 100644
--- a/media/libmediametrics/include/MediaMetricsConstants.h
+++ b/media/libmediametrics/include/MediaMetricsConstants.h
@@ -160,6 +160,12 @@
#define AMEDIAMETRICS_PROP_VOLUME_LEFT "volume.left" // double (AudioTrack)
#define AMEDIAMETRICS_PROP_VOLUME_RIGHT "volume.right" // double (AudioTrack)
#define AMEDIAMETRICS_PROP_WHERE "where" // string value
+// EncodingRequested is the encoding format requested by the app
+#define AMEDIAMETRICS_PROP_ENCODINGREQUESTED "encodingRequested" // string
+// PerformanceModeActual is the actual selected performance mode, could be "none', "loeLatency" or
+// "powerSaving"
+#define AMEDIAMETRICS_PROP_PERFORMANCEMODEACTUAL "performanceModeActual" // string
+#define AMEDIAMETRICS_PROP_FRAMESTRANSFERRED "framesTransferred" // int64_t, transferred frames
// Timing values: millisecond values are suffixed with MS and the type is double
// nanosecond values are suffixed with NS and the type is int64.
diff --git a/media/libmediaplayerservice/Android.bp b/media/libmediaplayerservice/Android.bp
index d250976..287317d 100644
--- a/media/libmediaplayerservice/Android.bp
+++ b/media/libmediaplayerservice/Android.bp
@@ -38,6 +38,7 @@
"media_permission-aidl-cpp",
"libaudioclient_aidl_conversion",
"libbase",
+ "libactivitymanager_aidl",
"libandroid_net",
"libaudioclient",
"libbinder",
diff --git a/media/libmediatranscoding/TEST_MAPPING b/media/libmediatranscoding/TEST_MAPPING
index f8a9db9..40f7b21 100644
--- a/media/libmediatranscoding/TEST_MAPPING
+++ b/media/libmediatranscoding/TEST_MAPPING
@@ -26,6 +26,9 @@
},
{
"name": "VideoTrackTranscoderTests"
+ },
+ {
+ "name": "CtsMediaTranscodingTestCases"
}
]
}
diff --git a/media/libmediatranscoding/TranscodingClientManager.cpp b/media/libmediatranscoding/TranscodingClientManager.cpp
index 06c5421..6dbcaf9 100644
--- a/media/libmediatranscoding/TranscodingClientManager.cpp
+++ b/media/libmediatranscoding/TranscodingClientManager.cpp
@@ -94,6 +94,12 @@
Status getSessionWithId(int32_t /*in_sessionId*/, TranscodingSessionParcel* /*out_session*/,
bool* /*_aidl_return*/) override;
+ Status addClientUid(int32_t /*in_sessionId*/, int32_t /*in_clientUid*/,
+ bool* /*_aidl_return*/) override;
+
+ Status getClientUids(int32_t /*in_sessionId*/,
+ std::optional<std::vector<int32_t>>* /*_aidl_return*/) override;
+
Status unregister() override;
};
@@ -217,6 +223,63 @@
return Status::ok();
}
+Status TranscodingClientManager::ClientImpl::addClientUid(int32_t in_sessionId,
+ int32_t in_clientUid,
+ bool* _aidl_return) {
+ *_aidl_return = false;
+
+ std::shared_ptr<TranscodingClientManager> owner;
+ if (mAbandoned || (owner = mOwner.lock()) == nullptr) {
+ return Status::fromServiceSpecificError(IMediaTranscodingService::ERROR_DISCONNECTED);
+ }
+
+ if (in_sessionId < 0) {
+ return Status::ok();
+ }
+
+ int32_t callingPid = AIBinder_getCallingPid();
+ int32_t callingUid = AIBinder_getCallingUid();
+
+ // Check if we can trust clientUid. Only privilege caller could add uid to existing sessions.
+ if (in_clientUid == IMediaTranscodingService::USE_CALLING_UID) {
+ in_clientUid = callingUid;
+ } else if (in_clientUid < 0) {
+ return Status::ok();
+ } else if (in_clientUid != callingUid && !owner->isTrustedCaller(callingPid, callingUid)) {
+ ALOGE("addClientUid rejected (clientUid %d) "
+ "(don't trust callingUid %d)",
+ in_clientUid, callingUid);
+ return STATUS_ERROR_FMT(IMediaTranscodingService::ERROR_PERMISSION_DENIED,
+ "addClientUid rejected (clientUid %d) "
+ "(don't trust callingUid %d)",
+ in_clientUid, callingUid);
+ }
+
+ *_aidl_return = owner->mSessionController->addClientUid(mClientId, in_sessionId, in_clientUid);
+ return Status::ok();
+}
+
+Status TranscodingClientManager::ClientImpl::getClientUids(
+ int32_t in_sessionId, std::optional<std::vector<int32_t>>* _aidl_return) {
+ *_aidl_return = std::nullopt;
+
+ std::shared_ptr<TranscodingClientManager> owner;
+ if (mAbandoned || (owner = mOwner.lock()) == nullptr) {
+ return Status::fromServiceSpecificError(IMediaTranscodingService::ERROR_DISCONNECTED);
+ }
+
+ if (in_sessionId < 0) {
+ return Status::ok();
+ }
+
+ std::vector<int32_t> result;
+
+ if (owner->mSessionController->getClientUids(mClientId, in_sessionId, &result)) {
+ *_aidl_return = result;
+ }
+ return Status::ok();
+}
+
Status TranscodingClientManager::ClientImpl::unregister() {
bool abandoned = mAbandoned.exchange(true);
diff --git a/media/libmediatranscoding/TranscodingSessionController.cpp b/media/libmediatranscoding/TranscodingSessionController.cpp
index aeabe0f..9705f3c 100644
--- a/media/libmediatranscoding/TranscodingSessionController.cpp
+++ b/media/libmediatranscoding/TranscodingSessionController.cpp
@@ -19,6 +19,7 @@
#define VALIDATE_STATE 1
+#include <android/permission_manager.h>
#include <inttypes.h>
#include <media/TranscodingSessionController.h>
#include <media/TranscodingUidPolicy.h>
@@ -193,8 +194,9 @@
~Pacer() = default;
+ bool onSessionStarted(uid_t uid, uid_t callingUid);
void onSessionCompleted(uid_t uid, std::chrono::microseconds runningTime);
- bool onSessionStarted(uid_t uid);
+ void onSessionCancelled(uid_t uid);
private:
// Threshold of time between finish/start below which a back-to-back start is counted.
@@ -205,26 +207,60 @@
int32_t mBurstTimeQuotaSec;
struct UidHistoryEntry {
- std::chrono::steady_clock::time_point lastCompletedTime;
+ bool sessionActive = false;
int32_t burstCount = 0;
std::chrono::steady_clock::duration burstDuration{0};
+ std::chrono::steady_clock::time_point lastCompletedTime;
};
std::map<uid_t, UidHistoryEntry> mUidHistoryMap;
+ std::unordered_set<uid_t> mMtpUids;
+ std::unordered_set<uid_t> mNonMtpUids;
+
+ bool isSubjectToQuota(uid_t uid, uid_t callingUid);
};
-void TranscodingSessionController::Pacer::onSessionCompleted(
- uid_t uid, std::chrono::microseconds runningTime) {
- if (mUidHistoryMap.find(uid) == mUidHistoryMap.end()) {
- mUidHistoryMap.emplace(uid, UidHistoryEntry{});
+bool TranscodingSessionController::Pacer::isSubjectToQuota(uid_t uid, uid_t callingUid) {
+ // Submitting with self uid is not limited (which can only happen if it's used as an
+ // app-facing API). MediaProvider usage always submit on behalf of other uids.
+ if (uid == callingUid) {
+ return false;
}
- mUidHistoryMap[uid].lastCompletedTime = std::chrono::steady_clock::now();
- mUidHistoryMap[uid].burstCount++;
- mUidHistoryMap[uid].burstDuration += runningTime;
+
+ if (mMtpUids.find(uid) != mMtpUids.end()) {
+ return false;
+ }
+
+ if (mNonMtpUids.find(uid) != mNonMtpUids.end()) {
+ return true;
+ }
+
+ // We don't have MTP permission info about this uid yet, check permission and save the result.
+ int32_t result;
+ if (__builtin_available(android __TRANSCODING_MIN_API__, *)) {
+ if (APermissionManager_checkPermission("android.permission.ACCESS_MTP", -1 /*pid*/, uid,
+ &result) == PERMISSION_MANAGER_STATUS_OK &&
+ result == PERMISSION_MANAGER_PERMISSION_GRANTED) {
+ mMtpUids.insert(uid);
+ return false;
+ }
+ }
+
+ mNonMtpUids.insert(uid);
+ return true;
}
-bool TranscodingSessionController::Pacer::onSessionStarted(uid_t uid) {
- // If uid doesn't exist, this uid has no completed sessions. Skip.
+bool TranscodingSessionController::Pacer::onSessionStarted(uid_t uid, uid_t callingUid) {
+ if (!isSubjectToQuota(uid, callingUid)) {
+ ALOGI("Pacer::onSessionStarted: uid %d (caling uid: %d): not subject to quota", uid,
+ callingUid);
+ return true;
+ }
+
+ // If uid doesn't exist, only insert the entry and mark session active. Skip quota checking.
if (mUidHistoryMap.find(uid) == mUidHistoryMap.end()) {
+ mUidHistoryMap.emplace(uid, UidHistoryEntry{});
+ mUidHistoryMap[uid].sessionActive = true;
+ ALOGV("Pacer::onSessionStarted: uid %d: new", uid);
return true;
}
@@ -236,25 +272,55 @@
std::chrono::steady_clock::now() - mUidHistoryMap[uid].lastCompletedTime;
if (mUidHistoryMap[uid].burstCount >= mBurstCountQuota &&
mUidHistoryMap[uid].burstDuration >= std::chrono::seconds(mBurstTimeQuotaSec)) {
- ALOGW("Pacer: uid %d: over quota, burst count %d, time %lldms", uid,
- mUidHistoryMap[uid].burstCount, (long long)mUidHistoryMap[uid].burstDuration.count());
+ ALOGW("Pacer::onSessionStarted: uid %d: over quota, burst count %d, time %lldms", uid,
+ mUidHistoryMap[uid].burstCount,
+ (long long)mUidHistoryMap[uid].burstDuration.count() / 1000000);
return false;
}
// If not over quota, allow the session, and reset as long as this is not too close
// to previous completion.
if (timeSinceLastComplete > std::chrono::milliseconds(mBurstThresholdMs)) {
- ALOGV("Pacer: uid %d: reset quota", uid);
+ ALOGV("Pacer::onSessionStarted: uid %d: reset quota", uid);
mUidHistoryMap[uid].burstCount = 0;
mUidHistoryMap[uid].burstDuration = std::chrono::milliseconds(0);
} else {
- ALOGV("Pacer: uid %d: burst count %d, time %lldms", uid, mUidHistoryMap[uid].burstCount,
- (long long)mUidHistoryMap[uid].burstDuration.count());
+ ALOGV("Pacer::onSessionStarted: uid %d: burst count %d, time %lldms", uid,
+ mUidHistoryMap[uid].burstCount,
+ (long long)mUidHistoryMap[uid].burstDuration.count() / 1000000);
}
+ mUidHistoryMap[uid].sessionActive = true;
return true;
}
+void TranscodingSessionController::Pacer::onSessionCompleted(
+ uid_t uid, std::chrono::microseconds runningTime) {
+ // Skip quota update if this uid missed the start. (Could happen if the uid is added via
+ // addClientUid() after the session start.)
+ if (mUidHistoryMap.find(uid) == mUidHistoryMap.end() || !mUidHistoryMap[uid].sessionActive) {
+ ALOGV("Pacer::onSessionCompleted: uid %d: not started", uid);
+ return;
+ }
+ ALOGV("Pacer::onSessionCompleted: uid %d: runningTime %lld", uid, runningTime.count() / 1000);
+ mUidHistoryMap[uid].sessionActive = false;
+ mUidHistoryMap[uid].burstCount++;
+ mUidHistoryMap[uid].burstDuration += runningTime;
+ mUidHistoryMap[uid].lastCompletedTime = std::chrono::steady_clock::now();
+}
+
+void TranscodingSessionController::Pacer::onSessionCancelled(uid_t uid) {
+ if (mUidHistoryMap.find(uid) == mUidHistoryMap.end()) {
+ ALOGV("Pacer::onSessionCancelled: uid %d: not present", uid);
+ return;
+ }
+ // This is only called if a uid is removed from a session (due to it being killed
+ // or the original submitting client was gone but session was kept for offline use).
+ // Since the uid is going to miss the onSessionCompleted(), we can't track this
+ // session, and have to check back at next onSessionStarted().
+ mUidHistoryMap[uid].sessionActive = false;
+}
+
///////////////////////////////////////////////////////////////////////////////
TranscodingSessionController::TranscodingSessionController(
@@ -372,6 +438,14 @@
}
uid_t topUid = *mUidSortedList.begin();
+ // If the current session is running, and it's in the topUid's queue, let it continue
+ // to run even if it's not the earliest in that uid's queue.
+ // For example, uid(B) is added to a session while it's pending in uid(A)'s queue, then
+ // B is brought to front which caused the session to run, then user switches back to A.
+ if (mCurrentSession != nullptr && mCurrentSession->getState() == Session::RUNNING &&
+ mCurrentSession->allClientUids.count(topUid) > 0) {
+ return mCurrentSession;
+ }
SessionKeyType topSessionKey = *mSessionQueues[topUid].begin();
return &mSessionMap[topSessionKey];
}
@@ -427,7 +501,7 @@
void TranscodingSessionController::updateCurrentSession_l() {
Session* curSession = mCurrentSession;
- Session* topSession = getTopSession_l();
+ Session* topSession = nullptr;
// Delayed init of transcoder and watchdog.
if (mTranscoder == nullptr) {
@@ -458,9 +532,18 @@
// Otherwise, ensure topSession is running.
if (topSession->getState() == Session::NOT_STARTED) {
- if (!mPacer->onSessionStarted(topSession->clientUid)) {
- // Unfortunately this uid is out of quota for new sessions.
- // Drop this sesion and try another one.
+ // Check if at least one client has quota to start the session.
+ bool keepForClient = false;
+ for (uid_t uid : topSession->allClientUids) {
+ if (mPacer->onSessionStarted(uid, topSession->callingUid)) {
+ keepForClient = true;
+ // DO NOT break here, because book-keeping still needs to happen
+ // for the other uids.
+ }
+ }
+ if (!keepForClient) {
+ // Unfortunately all uids requesting this session are out of quota.
+ // Drop this session and try the next one.
{
auto clientCallback = mSessionMap[topSession->key].callback.lock();
if (clientCallback != nullptr) {
@@ -484,8 +567,35 @@
mCurrentSession = topSession;
}
-void TranscodingSessionController::removeSession_l(const SessionKeyType& sessionKey,
- Session::State finalState) {
+void TranscodingSessionController::addUidToSession_l(uid_t clientUid,
+ const SessionKeyType& sessionKey) {
+ // If it's an offline session, the queue was already added in constructor.
+ // If it's a real-time sessions, check if a queue is already present for the uid,
+ // and add a new queue if needed.
+ if (clientUid != OFFLINE_UID) {
+ if (mSessionQueues.count(clientUid) == 0) {
+ mUidPolicy->registerMonitorUid(clientUid);
+ if (mUidPolicy->isUidOnTop(clientUid)) {
+ mUidSortedList.push_front(clientUid);
+ } else {
+ // Shouldn't be submitting real-time requests from non-top app,
+ // put it in front of the offline queue.
+ mUidSortedList.insert(mOfflineUidIterator, clientUid);
+ }
+ } else if (clientUid != *mUidSortedList.begin()) {
+ if (mUidPolicy->isUidOnTop(clientUid)) {
+ mUidSortedList.remove(clientUid);
+ mUidSortedList.push_front(clientUid);
+ }
+ }
+ }
+ // Append this session to the uid's queue.
+ mSessionQueues[clientUid].push_back(sessionKey);
+}
+
+void TranscodingSessionController::removeSession_l(
+ const SessionKeyType& sessionKey, Session::State finalState,
+ const std::shared_ptr<std::function<bool(uid_t uid)>>& keepUid) {
ALOGV("%s: session %s", __FUNCTION__, sessionToString(sessionKey).c_str());
if (mSessionMap.count(sessionKey) == 0) {
@@ -494,26 +604,48 @@
}
// Remove session from uid's queue.
- const uid_t uid = mSessionMap[sessionKey].clientUid;
- SessionQueueType& sessionQueue = mSessionQueues[uid];
- auto it = std::find(sessionQueue.begin(), sessionQueue.end(), sessionKey);
- if (it == sessionQueue.end()) {
- ALOGE("couldn't find session %s in queue for uid %d", sessionToString(sessionKey).c_str(),
- uid);
- return;
+ bool uidQueueRemoved = false;
+ std::unordered_set<uid_t> remainingUids;
+ for (uid_t uid : mSessionMap[sessionKey].allClientUids) {
+ if (keepUid != nullptr) {
+ if ((*keepUid)(uid)) {
+ remainingUids.insert(uid);
+ continue;
+ }
+ // If we have uids to keep, the session is not going to any final
+ // state we can't use onSessionCompleted as the running time will
+ // not be valid. Only notify pacer to stop tracking this session.
+ mPacer->onSessionCancelled(uid);
+ }
+ SessionQueueType& sessionQueue = mSessionQueues[uid];
+ auto it = std::find(sessionQueue.begin(), sessionQueue.end(), sessionKey);
+ if (it == sessionQueue.end()) {
+ ALOGW("couldn't find session %s in queue for uid %d",
+ sessionToString(sessionKey).c_str(), uid);
+ continue;
+ }
+ sessionQueue.erase(it);
+
+ // If this is the last session in a real-time queue, remove this uid's queue.
+ if (uid != OFFLINE_UID && sessionQueue.empty()) {
+ mUidSortedList.remove(uid);
+ mSessionQueues.erase(uid);
+ mUidPolicy->unregisterMonitorUid(uid);
+
+ uidQueueRemoved = true;
+ }
}
- sessionQueue.erase(it);
- // If this is the last session in a real-time queue, remove this uid's queue.
- if (uid != OFFLINE_UID && sessionQueue.empty()) {
- mUidSortedList.remove(uid);
- mSessionQueues.erase(uid);
- mUidPolicy->unregisterMonitorUid(uid);
-
+ if (uidQueueRemoved) {
std::unordered_set<uid_t> topUids = mUidPolicy->getTopUids();
moveUidsToTop_l(topUids, false /*preserveTopUid*/);
}
+ if (keepUid != nullptr) {
+ mSessionMap[sessionKey].allClientUids = remainingUids;
+ return;
+ }
+
// Clear current session.
if (mCurrentSession == &mSessionMap[sessionKey]) {
mCurrentSession = nullptr;
@@ -521,9 +653,10 @@
setSessionState_l(&mSessionMap[sessionKey], finalState);
- if (finalState == Session::FINISHED || finalState == Session::ERROR) {
- mPacer->onSessionCompleted(mSessionMap[sessionKey].clientUid,
- mSessionMap[sessionKey].runningTime);
+ // We can use onSessionCompleted() even for CANCELLED, because runningTime is
+ // now updated by setSessionState_l().
+ for (uid_t uid : mSessionMap[sessionKey].allClientUids) {
+ mPacer->onSessionCompleted(uid, mSessionMap[sessionKey].runningTime);
}
mSessionHistory.push_back(mSessionMap[sessionKey]);
@@ -617,34 +750,13 @@
// Add session to session map.
mSessionMap[sessionKey].key = sessionKey;
- mSessionMap[sessionKey].clientUid = clientUid;
mSessionMap[sessionKey].callingUid = callingUid;
+ mSessionMap[sessionKey].allClientUids.insert(clientUid);
mSessionMap[sessionKey].request = request;
mSessionMap[sessionKey].callback = callback;
setSessionState_l(&mSessionMap[sessionKey], Session::NOT_STARTED);
- // If it's an offline session, the queue was already added in constructor.
- // If it's a real-time sessions, check if a queue is already present for the uid,
- // and add a new queue if needed.
- if (clientUid != OFFLINE_UID) {
- if (mSessionQueues.count(clientUid) == 0) {
- mUidPolicy->registerMonitorUid(clientUid);
- if (mUidPolicy->isUidOnTop(clientUid)) {
- mUidSortedList.push_front(clientUid);
- } else {
- // Shouldn't be submitting real-time requests from non-top app,
- // put it in front of the offline queue.
- mUidSortedList.insert(mOfflineUidIterator, clientUid);
- }
- } else if (clientUid != *mUidSortedList.begin()) {
- if (mUidPolicy->isUidOnTop(clientUid)) {
- mUidSortedList.remove(clientUid);
- mUidSortedList.push_front(clientUid);
- }
- }
- }
- // Append this session to the uid's queue.
- mSessionQueues[clientUid].push_back(sessionKey);
+ addUidToSession_l(clientUid, sessionKey);
updateCurrentSession_l();
@@ -657,14 +769,20 @@
ALOGV("%s: session %s", __FUNCTION__, sessionToString(sessionKey).c_str());
- std::list<SessionKeyType> sessionsToRemove;
+ std::list<SessionKeyType> sessionsToRemove, sessionsForOffline;
std::scoped_lock lock{mLock};
if (sessionId < 0) {
for (auto it = mSessionMap.begin(); it != mSessionMap.end(); ++it) {
- if (it->first.first == clientId && it->second.clientUid != OFFLINE_UID) {
- sessionsToRemove.push_back(it->first);
+ if (it->first.first == clientId) {
+ // If there is offline request, only keep the offline client;
+ // otherwise remove the session.
+ if (it->second.allClientUids.count(OFFLINE_UID) > 0) {
+ sessionsForOffline.push_back(it->first);
+ } else {
+ sessionsToRemove.push_back(it->first);
+ }
}
}
} else {
@@ -688,6 +806,12 @@
removeSession_l(*it, Session::CANCELED);
}
+ auto keepUid = std::make_shared<std::function<bool(uid_t)>>(
+ [](uid_t uid) { return uid == OFFLINE_UID; });
+ for (auto it = sessionsForOffline.begin(); it != sessionsForOffline.end(); ++it) {
+ removeSession_l(*it, Session::CANCELED, keepUid);
+ }
+
// Start next session.
updateCurrentSession_l();
@@ -695,6 +819,51 @@
return true;
}
+bool TranscodingSessionController::addClientUid(ClientIdType clientId, SessionIdType sessionId,
+ uid_t clientUid) {
+ SessionKeyType sessionKey = std::make_pair(clientId, sessionId);
+
+ std::scoped_lock lock{mLock};
+
+ if (mSessionMap.count(sessionKey) == 0) {
+ ALOGE("session %s doesn't exist", sessionToString(sessionKey).c_str());
+ return false;
+ }
+
+ if (mSessionMap[sessionKey].allClientUids.count(clientUid) > 0) {
+ ALOGE("session %s already has uid %d", sessionToString(sessionKey).c_str(), clientUid);
+ return false;
+ }
+
+ mSessionMap[sessionKey].allClientUids.insert(clientUid);
+ addUidToSession_l(clientUid, sessionKey);
+
+ updateCurrentSession_l();
+
+ validateState_l();
+ return true;
+}
+
+bool TranscodingSessionController::getClientUids(ClientIdType clientId, SessionIdType sessionId,
+ std::vector<int32_t>* out_clientUids) {
+ SessionKeyType sessionKey = std::make_pair(clientId, sessionId);
+
+ std::scoped_lock lock{mLock};
+
+ if (mSessionMap.count(sessionKey) == 0) {
+ ALOGE("session %s doesn't exist", sessionToString(sessionKey).c_str());
+ return false;
+ }
+
+ out_clientUids->clear();
+ for (uid_t uid : mSessionMap[sessionKey].allClientUids) {
+ if (uid != OFFLINE_UID) {
+ out_clientUids->push_back(uid);
+ }
+ }
+ return true;
+}
+
bool TranscodingSessionController::getSession(ClientIdType clientId, SessionIdType sessionId,
TranscodingRequestParcel* request) {
SessionKeyType sessionKey = std::make_pair(clientId, sessionId);
@@ -886,6 +1055,58 @@
validateState_l();
}
+void TranscodingSessionController::onUidGone(uid_t goneUid) {
+ ALOGD("%s: gone uid %u", __FUNCTION__, goneUid);
+
+ std::list<SessionKeyType> sessionsToRemove, sessionsForOtherUids;
+
+ std::scoped_lock lock{mLock};
+
+ for (auto it = mSessionMap.begin(); it != mSessionMap.end(); ++it) {
+ if (it->second.allClientUids.count(goneUid) > 0) {
+ // If goneUid is the only uid, remove the session; otherwise, only
+ // remove the uid from the session.
+ if (it->second.allClientUids.size() > 1) {
+ sessionsForOtherUids.push_back(it->first);
+ } else {
+ sessionsToRemove.push_back(it->first);
+ }
+ }
+ }
+
+ for (auto it = sessionsToRemove.begin(); it != sessionsToRemove.end(); ++it) {
+ // If the session has ever been started, stop it now.
+ // Note that stop() is needed even if the session is currently paused. This instructs
+ // the transcoder to discard any states for the session, otherwise the states may
+ // never be discarded.
+ if (mSessionMap[*it].getState() != Session::NOT_STARTED) {
+ mTranscoder->stop(it->first, it->second);
+ }
+
+ {
+ auto clientCallback = mSessionMap[*it].callback.lock();
+ if (clientCallback != nullptr) {
+ clientCallback->onTranscodingFailed(it->second,
+ TranscodingErrorCode::kUidGoneCancelled);
+ }
+ }
+
+ // Remove the session.
+ removeSession_l(*it, Session::CANCELED);
+ }
+
+ auto keepUid = std::make_shared<std::function<bool(uid_t)>>(
+ [goneUid](uid_t uid) { return uid != goneUid; });
+ for (auto it = sessionsForOtherUids.begin(); it != sessionsForOtherUids.end(); ++it) {
+ removeSession_l(*it, Session::CANCELED, keepUid);
+ }
+
+ // Start next session.
+ updateCurrentSession_l();
+
+ validateState_l();
+}
+
void TranscodingSessionController::onResourceAvailable() {
std::scoped_lock lock{mLock};
@@ -938,7 +1159,8 @@
LOG_ALWAYS_FATAL_IF(*mOfflineUidIterator != OFFLINE_UID,
"mOfflineUidIterator not pointing to offline uid");
LOG_ALWAYS_FATAL_IF(mUidSortedList.size() != mSessionQueues.size(),
- "mUidList and mSessionQueues size mismatch");
+ "mUidSortedList and mSessionQueues size mismatch, %zu vs %zu",
+ mUidSortedList.size(), mSessionQueues.size());
int32_t totalSessions = 0;
for (auto uid : mUidSortedList) {
@@ -952,8 +1174,14 @@
totalSessions += mSessionQueues[uid].size();
}
- LOG_ALWAYS_FATAL_IF(mSessionMap.size() != totalSessions,
- "mSessions size doesn't match total sessions counted from uid queues");
+ int32_t totalSessionsAlternative = 0;
+ for (auto const& s : mSessionMap) {
+ totalSessionsAlternative += s.second.allClientUids.size();
+ }
+ LOG_ALWAYS_FATAL_IF(totalSessions != totalSessionsAlternative,
+ "session count (including dup) from mSessionQueues doesn't match that from "
+ "mSessionMap, %d vs %d",
+ totalSessions, totalSessionsAlternative);
#endif // VALIDATE_STATE
}
diff --git a/media/libmediatranscoding/TranscodingUidPolicy.cpp b/media/libmediatranscoding/TranscodingUidPolicy.cpp
index b5eb028..0a1ffbc 100644
--- a/media/libmediatranscoding/TranscodingUidPolicy.cpp
+++ b/media/libmediatranscoding/TranscodingUidPolicy.cpp
@@ -141,38 +141,34 @@
}
void TranscodingUidPolicy::onUidStateChanged(uid_t uid, int32_t procState) {
- ALOGV("onUidStateChanged: %u, procState %d", uid, procState);
+ ALOGV("onUidStateChanged: uid %u, procState %d", uid, procState);
bool topUidSetChanged = false;
+ bool isUidGone = false;
std::unordered_set<uid_t> topUids;
{
Mutex::Autolock _l(mUidLock);
auto it = mUidStateMap.find(uid);
if (it != mUidStateMap.end() && it->second != procState) {
- // Top set changed if 1) the uid is in the current top uid set, or 2) the
- // new procState is at least the same priority as the current top uid state.
- bool isUidCurrentTop =
- mTopUidState != IMPORTANCE_UNKNOWN && mStateUidMap[mTopUidState].count(uid) > 0;
- bool isNewStateHigherThanTop =
- procState != IMPORTANCE_UNKNOWN &&
- (procState <= mTopUidState || mTopUidState == IMPORTANCE_UNKNOWN);
- topUidSetChanged = (isUidCurrentTop || isNewStateHigherThanTop);
+ isUidGone = (procState == AACTIVITYMANAGER_IMPORTANCE_GONE);
+
+ topUids = mStateUidMap[mTopUidState];
// Move uid to the new procState.
mStateUidMap[it->second].erase(uid);
mStateUidMap[procState].insert(uid);
it->second = procState;
- if (topUidSetChanged) {
- updateTopUid_l();
-
+ updateTopUid_l();
+ if (topUids != mStateUidMap[mTopUidState]) {
// Make a copy of the uid set for callback.
topUids = mStateUidMap[mTopUidState];
+ topUidSetChanged = true;
}
}
}
- ALOGV("topUidSetChanged: %d", topUidSetChanged);
+ ALOGV("topUidSetChanged: %d, isUidGone %d", topUidSetChanged, isUidGone);
if (topUidSetChanged) {
auto callback = mUidPolicyCallback.lock();
@@ -180,6 +176,12 @@
callback->onTopUidsChanged(topUids);
}
}
+ if (isUidGone) {
+ auto callback = mUidPolicyCallback.lock();
+ if (callback != nullptr) {
+ callback->onUidGone(uid);
+ }
+ }
}
void TranscodingUidPolicy::updateTopUid_l() {
diff --git a/media/libmediatranscoding/aidl/android/media/ITranscodingClient.aidl b/media/libmediatranscoding/aidl/android/media/ITranscodingClient.aidl
index 151e3d0..9ef9052 100644
--- a/media/libmediatranscoding/aidl/android/media/ITranscodingClient.aidl
+++ b/media/libmediatranscoding/aidl/android/media/ITranscodingClient.aidl
@@ -55,6 +55,32 @@
boolean getSessionWithId(in int sessionId, out TranscodingSessionParcel session);
/**
+ * Add an additional client uid requesting a session.
+ *
+ * @sessionId the session id to which to add the additional client uid.
+ * @clientUid the additional client uid to be added.
+ * @return false if the session doesn't exist or the client is already requesting the
+ * session, true otherwise.
+ */
+ boolean addClientUid(in int sessionId, int clientUid);
+
+ /**
+ * Retrieves the (unsorted) list of all clients requesting a session.
+ *
+ * Note that if a session was submitted with offline priority (
+ * TranscodingSessionPriority::kUnspecified), it initially will not be considered requested
+ * by any particular client, because the client could go away any time after the submission.
+ * However, additional uids could be added via addClientUid() after the submission, which
+ * essentially make the request a real-time request instead of an offline request.
+ *
+ * @sessionId the session id for which to retrieve the client uid list.
+ * @clientUids array to hold the retrieved client uid list.
+ * @return false if the session doesn't exist, true otherwise.
+ */
+ @nullable
+ int[] getClientUids(in int sessionId);
+
+ /**
* Unregister the client with the MediaTranscodingService.
*
* Client will not be able to perform any more transcoding after unregister.
diff --git a/media/libmediatranscoding/aidl/android/media/TranscodingErrorCode.aidl b/media/libmediatranscoding/aidl/android/media/TranscodingErrorCode.aidl
index 5349fe1..fdd86c7 100644
--- a/media/libmediatranscoding/aidl/android/media/TranscodingErrorCode.aidl
+++ b/media/libmediatranscoding/aidl/android/media/TranscodingErrorCode.aidl
@@ -38,4 +38,5 @@
kErrorIO = kPrivateErrorFirst + 5,
kInsufficientResources = kPrivateErrorFirst + 6,
kWatchdogTimeout = kPrivateErrorFirst + 7,
+ kUidGoneCancelled = kPrivateErrorFirst + 8,
}
\ No newline at end of file
diff --git a/media/libmediatranscoding/include/media/ControllerClientInterface.h b/media/libmediatranscoding/include/media/ControllerClientInterface.h
index 0d13607..9311e2e 100644
--- a/media/libmediatranscoding/include/media/ControllerClientInterface.h
+++ b/media/libmediatranscoding/include/media/ControllerClientInterface.h
@@ -60,6 +60,29 @@
virtual bool getSession(ClientIdType clientId, SessionIdType sessionId,
TranscodingRequestParcel* request) = 0;
+ /**
+ * Add an additional client uid requesting the session identified by <clientId, sessionId>.
+ *
+ * Returns false if the session doesn't exist, or the client is already requesting the
+ * session. Returns true otherwise.
+ */
+ virtual bool addClientUid(ClientIdType clientId, SessionIdType sessionId, uid_t clientUid);
+
+ /**
+ * Retrieves the (unsorted) list of all clients requesting the session identified by
+ * <clientId, sessionId>.
+ *
+ * Note that if a session was submitted with offline priority (
+ * TranscodingSessionPriority::kUnspecified), it initially will not be considered requested
+ * by any particular client, because the client could go away any time after the submission.
+ * However, additional uids could be added via addClientUid() after the submission, which
+ * essentially make the request a real-time request instead of an offline request.
+ *
+ * Returns false if the session doesn't exist. Returns true otherwise.
+ */
+ virtual bool getClientUids(ClientIdType clientId, SessionIdType sessionId,
+ std::vector<int32_t>* out_clientUids);
+
protected:
virtual ~ControllerClientInterface() = default;
};
diff --git a/media/libmediatranscoding/include/media/TranscodingSessionController.h b/media/libmediatranscoding/include/media/TranscodingSessionController.h
index b2d6f0a..2691201 100644
--- a/media/libmediatranscoding/include/media/TranscodingSessionController.h
+++ b/media/libmediatranscoding/include/media/TranscodingSessionController.h
@@ -54,6 +54,9 @@
bool cancel(ClientIdType clientId, SessionIdType sessionId) override;
bool getSession(ClientIdType clientId, SessionIdType sessionId,
TranscodingRequestParcel* request) override;
+ bool addClientUid(ClientIdType clientId, SessionIdType sessionId, uid_t clientUid) override;
+ bool getClientUids(ClientIdType clientId, SessionIdType sessionId,
+ std::vector<int32_t>* out_clientUids) override;
// ~ControllerClientInterface
// TranscoderCallbackInterface
@@ -70,6 +73,7 @@
// UidPolicyCallbackInterface
void onTopUidsChanged(const std::unordered_set<uid_t>& uids) override;
+ void onUidGone(uid_t goneUid) override;
// ~UidPolicyCallbackInterface
// ResourcePolicyCallbackInterface
@@ -120,8 +124,8 @@
DROPPED_BY_PACER,
};
SessionKeyType key;
- uid_t clientUid;
uid_t callingUid;
+ std::unordered_set<uid_t> allClientUids;
int32_t lastProgress = 0;
int32_t pauseCount = 0;
std::chrono::time_point<std::chrono::steady_clock> stateEnterTime;
@@ -184,7 +188,9 @@
void dumpSession_l(const Session& session, String8& result, bool closedSession = false);
Session* getTopSession_l();
void updateCurrentSession_l();
- void removeSession_l(const SessionKeyType& sessionKey, Session::State finalState);
+ void addUidToSession_l(uid_t uid, const SessionKeyType& sessionKey);
+ void removeSession_l(const SessionKeyType& sessionKey, Session::State finalState,
+ const std::shared_ptr<std::function<bool(uid_t uid)>>& keepUid = nullptr);
void moveUidsToTop_l(const std::unordered_set<uid_t>& uids, bool preserveTopUid);
void setSessionState_l(Session* session, Session::State state);
void notifyClient(ClientIdType clientId, SessionIdType sessionId, const char* reason,
diff --git a/media/libmediatranscoding/include/media/UidPolicyInterface.h b/media/libmediatranscoding/include/media/UidPolicyInterface.h
index 05d8db0..445a2ff 100644
--- a/media/libmediatranscoding/include/media/UidPolicyInterface.h
+++ b/media/libmediatranscoding/include/media/UidPolicyInterface.h
@@ -48,6 +48,9 @@
// has changed. The receiver of this callback should adjust accordingly.
virtual void onTopUidsChanged(const std::unordered_set<uid_t>& uids) = 0;
+ // Called when a uid is gone.
+ virtual void onUidGone(uid_t goneUid) = 0;
+
protected:
virtual ~UidPolicyCallbackInterface() = default;
};
diff --git a/media/libmediatranscoding/tests/TranscodingClientManager_tests.cpp b/media/libmediatranscoding/tests/TranscodingClientManager_tests.cpp
index 57a2e27..9233410 100644
--- a/media/libmediatranscoding/tests/TranscodingClientManager_tests.cpp
+++ b/media/libmediatranscoding/tests/TranscodingClientManager_tests.cpp
@@ -50,6 +50,7 @@
constexpr const char* kClientName = "TestClientName";
constexpr const char* kClientPackage = "TestClientPackage";
+constexpr uid_t OFFLINE_UID = -1;
#define SESSION(n) (n)
@@ -135,8 +136,8 @@
virtual ~TestController() { ALOGI("TestController Destroyed"); }
- bool submit(ClientIdType clientId, SessionIdType sessionId, uid_t /*callingUid*/, uid_t /*uid*/,
- const TranscodingRequestParcel& request,
+ bool submit(ClientIdType clientId, SessionIdType sessionId, uid_t /*callingUid*/,
+ uid_t clientUid, const TranscodingRequestParcel& request,
const std::weak_ptr<ITranscodingClientCallback>& clientCallback) override {
SessionKeyType sessionKey = std::make_pair(clientId, sessionId);
if (mSessions.count(sessionKey) > 0) {
@@ -149,13 +150,47 @@
return false;
}
+ if (request.priority == TranscodingSessionPriority::kUnspecified) {
+ clientUid = OFFLINE_UID;
+ }
+
mSessions[sessionKey].request = request;
mSessions[sessionKey].callback = clientCallback;
+ mSessions[sessionKey].allClientUids.insert(clientUid);
mLastSession = sessionKey;
return true;
}
+ bool addClientUid(ClientIdType clientId, SessionIdType sessionId, uid_t clientUid) override {
+ SessionKeyType sessionKey = std::make_pair(clientId, sessionId);
+
+ if (mSessions.count(sessionKey) == 0) {
+ return false;
+ }
+ if (mSessions[sessionKey].allClientUids.count(clientUid) > 0) {
+ return false;
+ }
+ mSessions[sessionKey].allClientUids.insert(clientUid);
+ return true;
+ }
+
+ bool getClientUids(ClientIdType clientId, SessionIdType sessionId,
+ std::vector<int32_t>* out_clientUids) override {
+ SessionKeyType sessionKey = std::make_pair(clientId, sessionId);
+
+ if (mSessions.count(sessionKey) == 0) {
+ return false;
+ }
+ out_clientUids->clear();
+ for (uid_t uid : mSessions[sessionKey].allClientUids) {
+ if (uid != OFFLINE_UID) {
+ out_clientUids->push_back(uid);
+ }
+ }
+ return true;
+ }
+
bool cancel(ClientIdType clientId, SessionIdType sessionId) override {
SessionKeyType sessionKey = std::make_pair(clientId, sessionId);
@@ -211,6 +246,7 @@
struct Session {
TranscodingRequest request;
std::weak_ptr<ITranscodingClientCallback> callback;
+ std::unordered_set<uid_t> allClientUids;
};
typedef std::pair<ClientIdType, SessionIdType> SessionKeyType;
@@ -537,4 +573,93 @@
EXPECT_EQ(status.getServiceSpecificError(), IMediaTranscodingService::ERROR_DISCONNECTED);
}
+TEST_F(TranscodingClientManagerTest, TestAddGetClientUidsInvalidArgs) {
+ addMultipleClients();
+
+ bool result;
+ std::optional<std::vector<int32_t>> clientUids;
+ TranscodingRequestParcel request;
+ TranscodingSessionParcel session;
+ uid_t ownUid = ::getuid();
+
+ // Add/Get clients with invalid session id fails.
+ EXPECT_TRUE(mClient1->addClientUid(-1, ownUid, &result).isOk());
+ EXPECT_FALSE(result);
+ EXPECT_TRUE(mClient1->addClientUid(SESSION(0), ownUid, &result).isOk());
+ EXPECT_FALSE(result);
+ EXPECT_TRUE(mClient1->getClientUids(-1, &clientUids).isOk());
+ EXPECT_EQ(clientUids, std::nullopt);
+ EXPECT_TRUE(mClient1->getClientUids(SESSION(0), &clientUids).isOk());
+ EXPECT_EQ(clientUids, std::nullopt);
+
+ unregisterMultipleClients();
+}
+
+TEST_F(TranscodingClientManagerTest, TestAddGetClientUids) {
+ addMultipleClients();
+
+ bool result;
+ std::optional<std::vector<int32_t>> clientUids;
+ TranscodingRequestParcel request;
+ TranscodingSessionParcel session;
+ uid_t ownUid = ::getuid();
+
+ // Submit one real-time session.
+ request.sourceFilePath = "test_source_file_0";
+ request.destinationFilePath = "test_desintaion_file_0";
+ request.priority = TranscodingSessionPriority::kNormal;
+ EXPECT_TRUE(mClient1->submitRequest(request, &session, &result).isOk());
+ EXPECT_TRUE(result);
+
+ // Should have own uid in client uid list.
+ EXPECT_TRUE(mClient1->getClientUids(SESSION(0), &clientUids).isOk());
+ EXPECT_NE(clientUids, std::nullopt);
+ EXPECT_EQ(clientUids->size(), 1);
+ EXPECT_EQ((*clientUids)[0], ownUid);
+
+ // Adding invalid client uid should fail.
+ EXPECT_TRUE(mClient1->addClientUid(SESSION(0), kInvalidClientUid, &result).isOk());
+ EXPECT_FALSE(result);
+
+ // Adding own uid again should fail.
+ EXPECT_TRUE(mClient1->addClientUid(SESSION(0), ownUid, &result).isOk());
+ EXPECT_FALSE(result);
+
+ // Submit one offline session.
+ request.sourceFilePath = "test_source_file_1";
+ request.destinationFilePath = "test_desintaion_file_1";
+ request.priority = TranscodingSessionPriority::kUnspecified;
+ EXPECT_TRUE(mClient1->submitRequest(request, &session, &result).isOk());
+ EXPECT_TRUE(result);
+
+ // Should not have own uid in client uid list.
+ EXPECT_TRUE(mClient1->getClientUids(SESSION(1), &clientUids).isOk());
+ EXPECT_NE(clientUids, std::nullopt);
+ EXPECT_EQ(clientUids->size(), 0);
+
+ // Add own uid (with IMediaTranscodingService::USE_CALLING_UID) again, should succeed.
+ EXPECT_TRUE(
+ mClient1->addClientUid(SESSION(1), IMediaTranscodingService::USE_CALLING_UID, &result)
+ .isOk());
+ EXPECT_TRUE(result);
+ EXPECT_TRUE(mClient1->getClientUids(SESSION(1), &clientUids).isOk());
+ EXPECT_NE(clientUids, std::nullopt);
+ EXPECT_EQ(clientUids->size(), 1);
+ EXPECT_EQ((*clientUids)[0], ownUid);
+
+ // Add more uids, should succeed.
+ int32_t kFakeUid = ::getuid() ^ 0x1;
+ EXPECT_TRUE(mClient1->addClientUid(SESSION(1), kFakeUid, &result).isOk());
+ EXPECT_TRUE(result);
+ EXPECT_TRUE(mClient1->getClientUids(SESSION(1), &clientUids).isOk());
+ EXPECT_NE(clientUids, std::nullopt);
+ std::unordered_set<uid_t> uidSet;
+ uidSet.insert(clientUids->begin(), clientUids->end());
+ EXPECT_EQ(uidSet.size(), 2);
+ EXPECT_EQ(uidSet.count(ownUid), 1);
+ EXPECT_EQ(uidSet.count(kFakeUid), 1);
+
+ unregisterMultipleClients();
+}
+
} // namespace android
diff --git a/media/libmediatranscoding/tests/TranscodingSessionController_tests.cpp b/media/libmediatranscoding/tests/TranscodingSessionController_tests.cpp
index 560d1fe..ef9c4f8 100644
--- a/media/libmediatranscoding/tests/TranscodingSessionController_tests.cpp
+++ b/media/libmediatranscoding/tests/TranscodingSessionController_tests.cpp
@@ -234,11 +234,14 @@
}
struct TestClientCallback : public BnTranscodingClientCallback {
- TestClientCallback(TestTranscoder* owner, int64_t clientId)
- : mOwner(owner), mClientId(clientId) {
+ TestClientCallback(TestTranscoder* owner, ClientIdType clientId, uid_t clientUid)
+ : mOwner(owner), mClientId(clientId), mClientUid(clientUid) {
ALOGD("TestClient Created");
}
+ ClientIdType clientId() const { return mClientId; }
+ uid_t clientUid() const { return mClientUid; }
+
Status openFileDescriptor(const std::string& /*in_fileUri*/, const std::string& /*in_mode*/,
::ndk::ScopedFileDescriptor* /*_aidl_return*/) override {
return Status::ok();
@@ -277,7 +280,8 @@
private:
TestTranscoder* mOwner;
- int64_t mClientId;
+ ClientIdType mClientId;
+ uid_t mClientUid;
TestClientCallback(const TestClientCallback&) = delete;
TestClientCallback& operator=(const TestClientCallback&) = delete;
};
@@ -313,14 +317,14 @@
// Set priority only, ignore other fields for now.
mOfflineRequest.priority = TranscodingSessionPriority::kUnspecified;
mRealtimeRequest.priority = TranscodingSessionPriority::kHigh;
- mClientCallback0 =
- ::ndk::SharedRefBase::make<TestClientCallback>(mTranscoder.get(), CLIENT(0));
- mClientCallback1 =
- ::ndk::SharedRefBase::make<TestClientCallback>(mTranscoder.get(), CLIENT(1));
- mClientCallback2 =
- ::ndk::SharedRefBase::make<TestClientCallback>(mTranscoder.get(), CLIENT(2));
- mClientCallback3 =
- ::ndk::SharedRefBase::make<TestClientCallback>(mTranscoder.get(), CLIENT(3));
+ mClientCallback0 = ::ndk::SharedRefBase::make<TestClientCallback>(mTranscoder.get(),
+ CLIENT(0), UID(0));
+ mClientCallback1 = ::ndk::SharedRefBase::make<TestClientCallback>(mTranscoder.get(),
+ CLIENT(1), UID(1));
+ mClientCallback2 = ::ndk::SharedRefBase::make<TestClientCallback>(mTranscoder.get(),
+ CLIENT(2), UID(2));
+ mClientCallback3 = ::ndk::SharedRefBase::make<TestClientCallback>(mTranscoder.get(),
+ CLIENT(3), UID(3));
}
void TearDown() override { ALOGI("TranscodingSessionControllerTest tear down"); }
@@ -335,36 +339,71 @@
EXPECT_EQ(mTranscoder.use_count(), 2);
}
+ void testPacerHelper(int numSubmits, int sessionDurationMs, int expectedSuccess) {
+ testPacerHelper(numSubmits, sessionDurationMs, expectedSuccess, mClientCallback0, {},
+ false /*pauseLastSuccessSession*/, true /*useRealCallingUid*/);
+ }
+
+ void testPacerHelperWithPause(int numSubmits, int sessionDurationMs, int expectedSuccess) {
+ testPacerHelper(numSubmits, sessionDurationMs, expectedSuccess, mClientCallback0, {},
+ true /*pauseLastSuccessSession*/, true /*useRealCallingUid*/);
+ }
+
+ void testPacerHelperWithMultipleUids(int numSubmits, int sessionDurationMs, int expectedSuccess,
+ const std::shared_ptr<TestClientCallback>& client,
+ const std::vector<int>& additionalClientUids) {
+ testPacerHelper(numSubmits, sessionDurationMs, expectedSuccess, client,
+ additionalClientUids, false /*pauseLastSuccessSession*/,
+ true /*useRealCallingUid*/);
+ }
+
+ void testPacerHelperWithSelfUid(int numSubmits, int sessionDurationMs, int expectedSuccess) {
+ testPacerHelper(numSubmits, sessionDurationMs, expectedSuccess, mClientCallback0, {},
+ false /*pauseLastSuccessSession*/, false /*useRealCallingUid*/);
+ }
+
void testPacerHelper(int numSubmits, int sessionDurationMs, int expectedSuccess,
- bool pauseLastSuccessSession = false) {
+ const std::shared_ptr<TestClientCallback>& client,
+ const std::vector<int>& additionalClientUids, bool pauseLastSuccessSession,
+ bool useRealCallingUid) {
+ uid_t callingUid = useRealCallingUid ? ::getuid() : client->clientUid();
for (int i = 0; i < numSubmits; i++) {
- mController->submit(CLIENT(0), SESSION(i), UID(0), UID(0),
- mRealtimeRequest, mClientCallback0);
+ mController->submit(client->clientId(), SESSION(i), callingUid, client->clientUid(),
+ mRealtimeRequest, client);
+ for (int additionalUid : additionalClientUids) {
+ mController->addClientUid(client->clientId(), SESSION(i), additionalUid);
+ }
}
for (int i = 0; i < expectedSuccess; i++) {
- EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(i)));
+ EXPECT_EQ(mTranscoder->popEvent(),
+ TestTranscoder::Start(client->clientId(), SESSION(i)));
if ((i == expectedSuccess - 1) && pauseLastSuccessSession) {
// Insert a pause of 3 sec to the last success running session
mController->onThrottlingStarted();
- EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Pause(CLIENT(0), SESSION(i)));
+ EXPECT_EQ(mTranscoder->popEvent(),
+ TestTranscoder::Pause(client->clientId(), SESSION(i)));
sleep(3);
mController->onThrottlingStopped();
- EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Resume(CLIENT(0), SESSION(i)));
+ EXPECT_EQ(mTranscoder->popEvent(),
+ TestTranscoder::Resume(client->clientId(), SESSION(i)));
}
usleep(sessionDurationMs * 1000);
// Test half of Finish and half of Error, both should be counted as burst runs.
if (i & 1) {
- mController->onFinish(CLIENT(0), SESSION(i));
- EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Finished(CLIENT(0), SESSION(i)));
+ mController->onFinish(client->clientId(), SESSION(i));
+ EXPECT_EQ(mTranscoder->popEvent(),
+ TestTranscoder::Finished(client->clientId(), SESSION(i)));
} else {
- mController->onError(CLIENT(0), SESSION(i), TranscodingErrorCode::kUnknown);
+ mController->onError(client->clientId(), SESSION(i),
+ TranscodingErrorCode::kUnknown);
EXPECT_EQ(mTranscoder->popEvent(100000),
- TestTranscoder::Failed(CLIENT(0), SESSION(i)));
+ TestTranscoder::Failed(client->clientId(), SESSION(i)));
EXPECT_EQ(mTranscoder->getLastError(), TranscodingErrorCode::kUnknown);
}
}
for (int i = expectedSuccess; i < numSubmits; i++) {
- EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Failed(CLIENT(0), SESSION(i)));
+ EXPECT_EQ(mTranscoder->popEvent(),
+ TestTranscoder::Failed(client->clientId(), SESSION(i)));
EXPECT_EQ(mTranscoder->getLastError(), TranscodingErrorCode::kDroppedByService);
}
}
@@ -470,6 +509,83 @@
EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Stop(CLIENT(0), SESSION(3)));
}
+TEST_F(TranscodingSessionControllerTest, TestCancelSessionWithMultipleUids) {
+ ALOGD("TestCancelSessionWithMultipleUids");
+ std::vector<int32_t> clientUids;
+
+ // Submit real-time session SESSION(0), should start immediately.
+ mController->submit(CLIENT(0), SESSION(0), UID(0), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(0)));
+
+ // Submit real-time session SESSION(1), should not start.
+ mController->submit(CLIENT(0), SESSION(1), UID(0), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Submit offline session SESSION(2), should not start.
+ mController->submit(CLIENT(0), SESSION(2), UID(0), UID(0), mOfflineRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // UID(1) moves to top.
+ mUidPolicy->setTop(UID(1));
+
+ // Add UID(1) to the offline SESSION(2), SESSION(2) should start and SESSION(0) should pause.
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(2), UID(1)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Pause(CLIENT(0), SESSION(0)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(2)));
+
+ // Add UID(1) to SESSION(1) as well.
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(1), UID(1)));
+
+ // Cancel SESSION(2), should be cancelled and SESSION(1) should start.
+ EXPECT_TRUE(mController->cancel(CLIENT(0), SESSION(2)));
+ EXPECT_FALSE(mController->getClientUids(CLIENT(0), SESSION(2), &clientUids));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Stop(CLIENT(0), SESSION(2)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(1)));
+
+ // Cancel SESSION(1), should be cancelled and SESSION(0) should resume.
+ EXPECT_TRUE(mController->cancel(CLIENT(0), SESSION(1)));
+ EXPECT_FALSE(mController->getClientUids(CLIENT(0), SESSION(1), &clientUids));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Stop(CLIENT(0), SESSION(1)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Resume(CLIENT(0), SESSION(0)));
+}
+
+TEST_F(TranscodingSessionControllerTest, TestCancelAllSessionsForClient) {
+ // Submit real-time session SESSION(0), should start immediately.
+ mController->submit(CLIENT(0), SESSION(0), UID(0), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(0)));
+
+ // Submit real-time session SESSION(1), should not start.
+ mController->submit(CLIENT(0), SESSION(1), UID(0), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Submit offline session SESSION(2), should not start.
+ mController->submit(CLIENT(0), SESSION(2), UID(0), UID(0), mOfflineRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ std::vector<int32_t> clientUids;
+ // Make some more uids blocked on the sessions.
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(0), UID(1)));
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(1), UID(1)));
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(2), UID(1)));
+ EXPECT_TRUE(mController->getClientUids(CLIENT(0), SESSION(0), &clientUids));
+ EXPECT_EQ(clientUids.size(), 2);
+ EXPECT_TRUE(mController->getClientUids(CLIENT(0), SESSION(1), &clientUids));
+ EXPECT_EQ(clientUids.size(), 2);
+ EXPECT_TRUE(mController->getClientUids(CLIENT(0), SESSION(2), &clientUids));
+ EXPECT_EQ(clientUids.size(), 1);
+
+ // Cancel all sessions for CLIENT(0) with -1.
+ // Expect SESSION(0) and SESSION(1) to be gone.
+ // Expect SESSION(2) still there with empty client uid list (only kept for offline) and start.
+ EXPECT_TRUE(mController->cancel(CLIENT(0), -1));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Stop(CLIENT(0), SESSION(0)));
+ EXPECT_FALSE(mController->getClientUids(CLIENT(0), SESSION(0), &clientUids));
+ EXPECT_FALSE(mController->getClientUids(CLIENT(0), SESSION(1), &clientUids));
+ EXPECT_TRUE(mController->getClientUids(CLIENT(0), SESSION(2), &clientUids));
+ EXPECT_EQ(clientUids.size(), 0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(2)));
+}
+
TEST_F(TranscodingSessionControllerTest, TestFinishSession) {
ALOGD("TestFinishSession");
@@ -527,6 +643,45 @@
EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
}
+TEST_F(TranscodingSessionControllerTest, TestFinishSessionWithMultipleUids) {
+ ALOGD("TestFinishSessionWithMultipleUids");
+ std::vector<int32_t> clientUids;
+
+ // Start with unspecified top uid.
+ // Submit real-time session SESSION(0), should start immediately.
+ mController->submit(CLIENT(0), SESSION(0), UID(0), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(0)));
+
+ // Submit real-time session SESSION(1), should not start.
+ mController->submit(CLIENT(0), SESSION(1), UID(0), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(1), UID(1)));
+
+ // Submit real-time session SESSION(2), should not start.
+ mController->submit(CLIENT(0), SESSION(2), UID(0), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(2), UID(1)));
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(2), UID(2)));
+
+ // UID(1) moves to top.
+ mUidPolicy->setTop(UID(1));
+ // SESSION(0) should pause, SESSION(1) should start.
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Pause(CLIENT(0), SESSION(0)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(1)));
+
+ // Finish SESSION(1), SESSION(2) (next in line for UID(1)) should start.
+ mController->onFinish(CLIENT(0), SESSION(1));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Finished(CLIENT(0), SESSION(1)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(2)));
+ EXPECT_FALSE(mController->getClientUids(CLIENT(0), SESSION(1), &clientUids));
+
+ // Finish SESSION(2), SESSION(0) should resume.
+ mController->onFinish(CLIENT(0), SESSION(2));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Finished(CLIENT(0), SESSION(2)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Resume(CLIENT(0), SESSION(0)));
+ EXPECT_FALSE(mController->getClientUids(CLIENT(0), SESSION(2), &clientUids));
+}
+
TEST_F(TranscodingSessionControllerTest, TestFailSession) {
ALOGD("TestFailSession");
@@ -588,6 +743,49 @@
EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
}
+TEST_F(TranscodingSessionControllerTest, TestFailSessionWithMultipleUids) {
+ ALOGD("TestFailSessionWithMultipleUids");
+ std::vector<int32_t> clientUids;
+
+ // Start with unspecified top uid.
+ // Submit real-time session SESSION(0), should start immediately.
+ mController->submit(CLIENT(0), SESSION(0), UID(0), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(0)));
+
+ // Submit real-time session SESSION(1), should not start.
+ mController->submit(CLIENT(0), SESSION(1), UID(0), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(1), UID(1)));
+
+ // Submit real-time session SESSION(2), should not start.
+ mController->submit(CLIENT(0), SESSION(2), UID(0), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // UID(1) moves to top.
+ mUidPolicy->setTop(UID(1));
+ // SESSION(0) should pause, SESSION(1) should start.
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Pause(CLIENT(0), SESSION(0)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(1)));
+
+ // Add UID(1) and UID(2) to SESSION(2).
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(2), UID(1)));
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(2), UID(2)));
+
+ // Fail SESSION(1), SESSION(2) (next in line for UID(1)) should start.
+ mController->onError(CLIENT(0), SESSION(1), TranscodingErrorCode::kUnknown);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Failed(CLIENT(0), SESSION(1)));
+ EXPECT_EQ(mTranscoder->getLastError(), TranscodingErrorCode::kUnknown);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(2)));
+ EXPECT_FALSE(mController->getClientUids(CLIENT(0), SESSION(1), &clientUids));
+
+ // Fail SESSION(2), SESSION(0) should resume.
+ mController->onError(CLIENT(0), SESSION(2), TranscodingErrorCode::kInvalidOperation);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Failed(CLIENT(0), SESSION(2)));
+ EXPECT_EQ(mTranscoder->getLastError(), TranscodingErrorCode::kInvalidOperation);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Resume(CLIENT(0), SESSION(0)));
+ EXPECT_FALSE(mController->getClientUids(CLIENT(0), SESSION(2), &clientUids));
+}
+
TEST_F(TranscodingSessionControllerTest, TestTopUidChanged) {
ALOGD("TestTopUidChanged");
@@ -630,8 +828,59 @@
EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(1), SESSION(0)));
}
+TEST_F(TranscodingSessionControllerTest, TestTopUidChangedMultipleUids) {
+ ALOGD("TestTopUidChangedMultipleUids");
+
+ // Start with unspecified top UID.
+ // Submit real-time session to CLIENT(0), session should start immediately.
+ mController->submit(CLIENT(0), SESSION(0), UID(0), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(0)));
+
+ // Submit offline session to CLIENT(0), should not start.
+ mController->submit(CLIENT(1), SESSION(0), UID(1), UID(0), mOfflineRequest, mClientCallback1);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Bring UID(1) to top.
+ mUidPolicy->setTop(UID(1));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Add UID(1) to SESSION(0), SESSION(0) should continue to run
+ // (no pause&resume of the same session).
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(0), UID(1)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Bring UID(0) back to top, SESSION(0) should continue to run
+ // (no pause&resume of the same session).
+ mUidPolicy->setTop(UID(0));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Bring UID(2) to top.
+ mUidPolicy->setTop(UID(2));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+ // Add UID(2) to the offline session, it should be started.
+ EXPECT_TRUE(mController->addClientUid(CLIENT(1), SESSION(0), UID(2)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Pause(CLIENT(0), SESSION(0)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(1), SESSION(0)));
+
+ // ADD UID(3) to SESSION(0).
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(0), UID(3)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+ // Bring UID(3) to top, SESSION(0) should resume.
+ mUidPolicy->setTop(UID(3));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Pause(CLIENT(1), SESSION(0)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Resume(CLIENT(0), SESSION(0)));
+
+ // Now make UID(2) also blocked on CLIENT(0), SESSION(0).
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(0), UID(2)));
+
+ // Bring UID(2) back to top, CLIENT(0), SESSION(0) should continue to run (even if it's
+ // added to UID(2)'s queue later than CLIENT(1)'s SESSION(0)).
+ mUidPolicy->setTop(UID(2));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+}
+
TEST_F(TranscodingSessionControllerTest, TestTopUidSetChanged) {
- ALOGD("TestTopUidChanged_MultipleUids");
+ ALOGD("TestTopUidSetChanged");
// Start with unspecified top UID.
// Submit real-time session to CLIENT(0), session should start immediately.
@@ -684,6 +933,100 @@
EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(1), SESSION(0)));
}
+TEST_F(TranscodingSessionControllerTest, TestUidGone) {
+ ALOGD("TestUidGone");
+
+ mUidPolicy->setTop(UID(0));
+ // Start with unspecified top UID.
+ // Submit real-time sessions to CLIENT(0), session should start immediately.
+ mController->submit(CLIENT(0), SESSION(0), UID(0), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(0)));
+
+ mController->submit(CLIENT(0), SESSION(1), UID(0), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(1), UID(1)));
+
+ // Submit real-time session to CLIENT(1), should not start.
+ mController->submit(CLIENT(1), SESSION(0), UID(1), UID(1), mOfflineRequest, mClientCallback1);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+ EXPECT_TRUE(mController->addClientUid(CLIENT(1), SESSION(0), UID(1)));
+
+ // Tell the controller that UID(0) is gone.
+ mUidPolicy->setTop(UID(1));
+ // CLIENT(0)'s SESSION(1) should start, SESSION(0) should be cancelled.
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Pause(CLIENT(0), SESSION(0)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(1)));
+ mController->onUidGone(UID(0));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Stop(CLIENT(0), SESSION(0)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Failed(CLIENT(0), SESSION(0)));
+ EXPECT_EQ(mTranscoder->getLastError(), TranscodingErrorCode::kUidGoneCancelled);
+
+ std::vector<int32_t> clientUids;
+ EXPECT_FALSE(mController->getClientUids(CLIENT(0), SESSION(0), &clientUids));
+ EXPECT_TRUE(mController->getClientUids(CLIENT(0), SESSION(1), &clientUids));
+ EXPECT_EQ(clientUids.size(), 1);
+ EXPECT_EQ(clientUids[0], UID(1));
+
+ // Tell the controller that UID(1) is gone too.
+ mController->onUidGone(UID(1));
+ // CLIENT(1)'s SESSION(0) should start, CLIENT(0)'s SESSION(1) should be cancelled.
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Stop(CLIENT(0), SESSION(1)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Failed(CLIENT(0), SESSION(1)));
+ EXPECT_EQ(mTranscoder->getLastError(), TranscodingErrorCode::kUidGoneCancelled);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(1), SESSION(0)));
+ // CLIENT(1) SESSION(0) should not have any client uids as it's only kept for offline.
+ EXPECT_TRUE(mController->getClientUids(CLIENT(1), SESSION(0), &clientUids));
+ EXPECT_EQ(clientUids.size(), 0);
+}
+
+TEST_F(TranscodingSessionControllerTest, TestAddGetClientUids) {
+ ALOGD("TestAddGetClientUids");
+
+ // Add/get client uids with non-existent session, should fail.
+ std::vector<int32_t> clientUids;
+ uid_t ownUid = ::getuid();
+ EXPECT_FALSE(mController->addClientUid(CLIENT(0), SESSION(0), ownUid));
+ EXPECT_FALSE(mController->getClientUids(CLIENT(0), SESSION(0), &clientUids));
+
+ // Submit a real-time request.
+ EXPECT_TRUE(mController->submit(CLIENT(0), SESSION(0), UID(0), UID(0), mRealtimeRequest,
+ mClientCallback0));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(0)));
+
+ // Should have own uid in client uids.
+ EXPECT_TRUE(mController->getClientUids(CLIENT(0), SESSION(0), &clientUids));
+ EXPECT_EQ(clientUids.size(), 1);
+ EXPECT_EQ(clientUids[0], UID(0));
+
+ // Add UID(0) again should fail.
+ EXPECT_FALSE(mController->addClientUid(CLIENT(0), SESSION(0), UID(0)));
+
+ // Add own uid should succeed.
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(0), ownUid));
+ EXPECT_TRUE(mController->getClientUids(CLIENT(0), SESSION(0), &clientUids));
+ std::unordered_set<uid_t> uidSet;
+ uidSet.insert(clientUids.begin(), clientUids.end());
+ EXPECT_EQ(uidSet.size(), 2);
+ EXPECT_EQ(uidSet.count(UID(0)), 1);
+ EXPECT_EQ(uidSet.count(ownUid), 1);
+
+ // Submit an offline request.
+ EXPECT_TRUE(mController->submit(CLIENT(0), SESSION(1), UID(0), UID(0), mOfflineRequest,
+ mClientCallback0));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Should not have own uid in client uids.
+ EXPECT_TRUE(mController->getClientUids(CLIENT(0), SESSION(1), &clientUids));
+ EXPECT_EQ(clientUids.size(), 0);
+
+ // Move UID(1) to top.
+ mUidPolicy->setTop(UID(1));
+ // Add UID(1) to offline session, offline session should start and SESSION(0) should pause.
+ EXPECT_TRUE(mController->addClientUid(CLIENT(0), SESSION(1), UID(1)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Pause(CLIENT(0), SESSION(0)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(1)));
+}
+
/* Test resource lost without thermal throttling */
TEST_F(TranscodingSessionControllerTest, TestResourceLost) {
ALOGD("TestResourceLost");
@@ -969,8 +1312,36 @@
TEST_F(TranscodingSessionControllerTest, TestTranscoderPacerWithPause) {
ALOGD("TestTranscoderPacerDuringPause");
- testPacerHelper(12 /*numSubmits*/, 400 /*sessionDurationMs*/, 10 /*expectedSuccess*/,
- true /*pauseLastSuccessSession*/);
+ testPacerHelperWithPause(12 /*numSubmits*/, 400 /*sessionDurationMs*/, 10 /*expectedSuccess*/);
+}
+
+/*
+ * Test the case where multiple client uids request the same session. Session should only
+ * be dropped when all clients are over quota.
+ */
+TEST_F(TranscodingSessionControllerTest, TestTranscoderPacerMultipleUids) {
+ ALOGD("TestTranscoderPacerMultipleUids");
+ // First, run mClientCallback0 to the point of no quota.
+ testPacerHelperWithMultipleUids(12 /*numSubmits*/, 400 /*sessionDurationMs*/,
+ 10 /*expectedSuccess*/, mClientCallback0, {});
+ // Make UID(0) block on Client1's sessions too, Client1's quota should not be affected.
+ testPacerHelperWithMultipleUids(12 /*numSubmits*/, 400 /*sessionDurationMs*/,
+ 10 /*expectedSuccess*/, mClientCallback1, {UID(0)});
+ // Make UID(10) block on Client2's sessions. We expect to see 11 succeeds (instead of 10),
+ // because the addClientUid() is called after the submit, and first session is already
+ // started by the time UID(10) is added. UID(10) allowed us to run the 11th session,
+ // after that both UID(10) and UID(2) are out of quota.
+ testPacerHelperWithMultipleUids(12 /*numSubmits*/, 400 /*sessionDurationMs*/,
+ 11 /*expectedSuccess*/, mClientCallback2, {UID(10)});
+}
+
+/*
+ * Use same uid for clientUid and callingUid, should not be limited by quota.
+ */
+TEST_F(TranscodingSessionControllerTest, TestTranscoderPacerSelfUid) {
+ ALOGD("TestTranscoderPacerSelfUid");
+ testPacerHelperWithSelfUid(12 /*numSubmits*/, 400 /*sessionDurationMs*/,
+ 12 /*expectedSuccess*/);
}
} // namespace android
diff --git a/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp b/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp
index 4405180..d56bec0 100644
--- a/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp
+++ b/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp
@@ -220,16 +220,15 @@
return AMEDIA_ERROR_INVALID_PARAMETER;
}
- int32_t bitrate;
- if (!AMediaFormat_getInt32(encoderFormat, AMEDIAFORMAT_KEY_BIT_RATE, &bitrate)) {
- status = mMediaSampleReader->getEstimatedBitrateForTrack(mTrackIndex, &bitrate);
+ if (!AMediaFormat_getInt32(encoderFormat, AMEDIAFORMAT_KEY_BIT_RATE, &mConfiguredBitrate)) {
+ status = mMediaSampleReader->getEstimatedBitrateForTrack(mTrackIndex, &mConfiguredBitrate);
if (status != AMEDIA_OK) {
LOG(ERROR) << "Unable to estimate bitrate. Using default " << kDefaultBitrateMbps;
- bitrate = kDefaultBitrateMbps;
+ mConfiguredBitrate = kDefaultBitrateMbps;
}
- LOG(INFO) << "Configuring bitrate " << bitrate;
- AMediaFormat_setInt32(encoderFormat, AMEDIAFORMAT_KEY_BIT_RATE, bitrate);
+ LOG(INFO) << "Configuring bitrate " << mConfiguredBitrate;
+ AMediaFormat_setInt32(encoderFormat, AMEDIAFORMAT_KEY_BIT_RATE, mConfiguredBitrate);
}
SetDefaultFormatValueFloat(AMEDIAFORMAT_KEY_I_FRAME_INTERVAL, encoderFormat,
diff --git a/media/libmediatranscoding/transcoder/include/media/VideoTrackTranscoder.h b/media/libmediatranscoding/transcoder/include/media/VideoTrackTranscoder.h
index 8a506a0..3e72882 100644
--- a/media/libmediatranscoding/transcoder/include/media/VideoTrackTranscoder.h
+++ b/media/libmediatranscoding/transcoder/include/media/VideoTrackTranscoder.h
@@ -45,6 +45,7 @@
private:
friend struct AsyncCodecCallbackDispatch;
+ friend class VideoTrackTranscoderTests;
// Minimal blocking queue used as a message queue by VideoTrackTranscoder.
template <typename T>
@@ -101,6 +102,7 @@
uid_t mUid;
uint64_t mInputFrameCount = 0;
uint64_t mOutputFrameCount = 0;
+ int32_t mConfiguredBitrate = 0;
};
} // namespace android
diff --git a/media/libmediatranscoding/transcoder/tests/AndroidTestTemplate.xml b/media/libmediatranscoding/transcoder/tests/AndroidTestTemplate.xml
index e40a507..c3a0ced 100644
--- a/media/libmediatranscoding/transcoder/tests/AndroidTestTemplate.xml
+++ b/media/libmediatranscoding/transcoder/tests/AndroidTestTemplate.xml
@@ -24,7 +24,7 @@
<test class="com.android.tradefed.testtype.GTest" >
<option name="native-test-device-path" value="/data/local/tmp" />
<option name="module-name" value="{MODULE}" />
- <option name="native-test-timeout" value="10m" />
+ <option name="native-test-timeout" value="30m" />
</test>
</configuration>
diff --git a/media/libmediatranscoding/transcoder/tests/VideoTrackTranscoderTests.cpp b/media/libmediatranscoding/transcoder/tests/VideoTrackTranscoderTests.cpp
index 1f9ec77..88c3fd3 100644
--- a/media/libmediatranscoding/transcoder/tests/VideoTrackTranscoderTests.cpp
+++ b/media/libmediatranscoding/transcoder/tests/VideoTrackTranscoderTests.cpp
@@ -86,6 +86,10 @@
~VideoTrackTranscoderTests() { LOG(DEBUG) << "VideoTrackTranscoderTests destroyed"; }
+ static int32_t getConfiguredBitrate(const std::shared_ptr<VideoTrackTranscoder>& transcoder) {
+ return transcoder->mConfiguredBitrate;
+ }
+
std::shared_ptr<MediaSampleReader> mMediaSampleReader;
int mTrackIndex;
std::shared_ptr<AMediaFormat> mSourceFormat;
@@ -140,7 +144,7 @@
TEST_F(VideoTrackTranscoderTests, PreserveBitrate) {
LOG(DEBUG) << "Testing PreserveBitrate";
auto callback = std::make_shared<TestTrackTranscoderCallback>();
- std::shared_ptr<MediaTrackTranscoder> transcoder = VideoTrackTranscoder::create(callback);
+ auto transcoder = VideoTrackTranscoder::create(callback);
auto destFormat = TrackTranscoderTestUtils::getDefaultVideoDestinationFormat(
mSourceFormat.get(), false /* includeBitrate*/);
@@ -155,15 +159,11 @@
ASSERT_TRUE(transcoder->start());
callback->waitUntilTrackFormatAvailable();
-
- auto outputFormat = transcoder->getOutputFormat();
- ASSERT_NE(outputFormat, nullptr);
-
transcoder->stop();
EXPECT_EQ(callback->waitUntilFinished(), AMEDIA_OK);
- int32_t outBitrate;
- EXPECT_TRUE(AMediaFormat_getInt32(outputFormat.get(), AMEDIAFORMAT_KEY_BIT_RATE, &outBitrate));
+ int32_t outBitrate = getConfiguredBitrate(transcoder);
+ ASSERT_GT(outBitrate, 0);
EXPECT_EQ(srcBitrate, outBitrate);
}
diff --git a/media/libnbaio/AudioStreamInSource.cpp b/media/libnbaio/AudioStreamInSource.cpp
index 1054b68..ca98b28 100644
--- a/media/libnbaio/AudioStreamInSource.cpp
+++ b/media/libnbaio/AudioStreamInSource.cpp
@@ -46,13 +46,11 @@
status_t result;
result = mStream->getBufferSize(&mStreamBufferSizeBytes);
if (result != OK) return result;
- audio_format_t streamFormat;
- uint32_t sampleRate;
- audio_channel_mask_t channelMask;
- result = mStream->getAudioProperties(&sampleRate, &channelMask, &streamFormat);
+ audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
+ result = mStream->getAudioProperties(&config);
if (result != OK) return result;
- mFormat = Format_from_SR_C(sampleRate,
- audio_channel_count_from_in_mask(channelMask), streamFormat);
+ mFormat = Format_from_SR_C(config.sample_rate,
+ audio_channel_count_from_in_mask(config.channel_mask), config.format);
mFrameSize = Format_frameSize(mFormat);
}
return NBAIO_Source::negotiate(offers, numOffers, counterOffers, numCounterOffers);
diff --git a/media/libnbaio/AudioStreamOutSink.cpp b/media/libnbaio/AudioStreamOutSink.cpp
index 8564899..581867f 100644
--- a/media/libnbaio/AudioStreamOutSink.cpp
+++ b/media/libnbaio/AudioStreamOutSink.cpp
@@ -44,13 +44,11 @@
status_t result;
result = mStream->getBufferSize(&mStreamBufferSizeBytes);
if (result != OK) return result;
- audio_format_t streamFormat;
- uint32_t sampleRate;
- audio_channel_mask_t channelMask;
- result = mStream->getAudioProperties(&sampleRate, &channelMask, &streamFormat);
+ audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
+ result = mStream->getAudioProperties(&config);
if (result != OK) return result;
- mFormat = Format_from_SR_C(sampleRate,
- audio_channel_count_from_out_mask(channelMask), streamFormat);
+ mFormat = Format_from_SR_C(config.sample_rate,
+ audio_channel_count_from_out_mask(config.channel_mask), config.format);
mFrameSize = Format_frameSize(mFormat);
}
return NBAIO_Sink::negotiate(offers, numOffers, counterOffers, numCounterOffers);
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index 52434b3..d6e36b9 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -274,6 +274,7 @@
"MPEG2TSWriter.cpp",
"MPEG4Writer.cpp",
"MediaAdapter.cpp",
+ "MediaAppender.cpp",
"MediaClock.cpp",
"MediaCodec.cpp",
"MediaCodecList.cpp",
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index 01190b5..0fd4ef2 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -113,7 +113,7 @@
return NULL;
}
sp<IMemory> frameMem = new MemoryBase(heap, 0, size);
- if (frameMem == NULL) {
+ if (frameMem == NULL || frameMem->unsecurePointer() == NULL) {
ALOGE("not enough memory for VideoFrame size=%zu", size);
return NULL;
}
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 76a5cab..5c39239 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -519,12 +519,12 @@
mSendNotify = false;
mWriteSeekErr = false;
mFallocateErr = false;
-
// Reset following variables for all the sessions and they will be
// initialized in start(MetaData *param).
mIsRealTimeRecording = true;
mUse4ByteNalLength = true;
mOffset = 0;
+ mMaxOffsetAppend = 0;
mPreAllocateFileEndOffset = 0;
mMdatOffset = 0;
mMdatEndOffset = 0;
@@ -992,6 +992,19 @@
seekOrPostError(mFd, mFreeBoxOffset, SEEK_SET);
writeInt32(mInMemoryCacheSize);
write("free", 4);
+ if (mInMemoryCacheSize >= 8) {
+ off64_t bufSize = mInMemoryCacheSize - 8;
+ char* zeroBuffer = new (std::nothrow) char[bufSize];
+ if (zeroBuffer) {
+ std::fill_n(zeroBuffer, bufSize, '0');
+ writeOrPostError(mFd, zeroBuffer, bufSize);
+ delete [] zeroBuffer;
+ } else {
+ ALOGW("freebox in file isn't initialized to 0");
+ }
+ } else {
+ ALOGW("freebox size is less than 8:%" PRId64, mInMemoryCacheSize);
+ }
mMdatOffset = mFreeBoxOffset + mInMemoryCacheSize;
} else {
mMdatOffset = mOffset;
@@ -1541,6 +1554,26 @@
MediaBuffer *buffer, bool usePrefix,
uint32_t tiffHdrOffset, size_t *bytesWritten) {
off64_t old_offset = mOffset;
+ int64_t offset;
+ ALOGV("buffer->range_length:%lld", (long long)buffer->range_length());
+ if (buffer->meta_data().findInt64(kKeySampleFileOffset, &offset)) {
+ ALOGV("offset:%lld, old_offset:%lld", (long long)offset, (long long)old_offset);
+ if (old_offset == offset) {
+ mOffset += buffer->range_length();
+ } else {
+ ALOGV("offset and old_offset are not equal! diff:%lld", (long long)offset - old_offset);
+ mOffset = offset + buffer->range_length();
+ // mOffset += buffer->range_length() + offset - old_offset;
+ }
+ *bytesWritten = buffer->range_length();
+ ALOGV("mOffset:%lld, mMaxOffsetAppend:%lld, bytesWritten:%lld", (long long)mOffset,
+ (long long)mMaxOffsetAppend, (long long)*bytesWritten);
+ mMaxOffsetAppend = std::max(mOffset, mMaxOffsetAppend);
+ seekOrPostError(mFd, mMaxOffsetAppend, SEEK_SET);
+ return offset;
+ }
+
+ ALOGV("mOffset:%lld, mMaxOffsetAppend:%lld", (long long)mOffset, (long long)mMaxOffsetAppend);
if (usePrefix) {
addMultipleLengthPrefixedSamples_l(buffer);
@@ -1557,6 +1590,10 @@
mOffset += buffer->range_length();
}
*bytesWritten = mOffset - old_offset;
+
+ ALOGV("mOffset:%lld, old_offset:%lld, bytesWritten:%lld", (long long)mOffset,
+ (long long)old_offset, (long long)*bytesWritten);
+
return old_offset;
}
@@ -1569,6 +1606,7 @@
(const uint8_t *)buffer->data() + buffer->range_offset();
if (!memcmp(ptr, "\x00\x00\x00\x01", 4)) {
+ ALOGV("stripping start code");
buffer->set_range(
buffer->range_offset() + 4, buffer->range_length() - 4);
}
@@ -1599,8 +1637,10 @@
}
void MPEG4Writer::addLengthPrefixedSample_l(MediaBuffer *buffer) {
+ ALOGV("alp:buffer->range_length:%lld", (long long)buffer->range_length());
size_t length = buffer->range_length();
if (mUse4ByteNalLength) {
+ ALOGV("mUse4ByteNalLength");
uint8_t x[4];
x[0] = length >> 24;
x[1] = (length >> 16) & 0xff;
@@ -1610,6 +1650,7 @@
writeOrPostError(mFd, (const uint8_t*)buffer->data() + buffer->range_offset(), length);
mOffset += length + 4;
} else {
+ ALOGV("mUse2ByteNalLength");
CHECK_LT(length, 65536u);
uint8_t x[2];
@@ -2762,6 +2803,9 @@
}
writeAllChunks();
+ ALOGV("threadFunc mOffset:%lld, mMaxOffsetAppend:%lld", (long long)mOffset,
+ (long long)mMaxOffsetAppend);
+ mOffset = std::max(mOffset, mMaxOffsetAppend);
}
status_t MPEG4Writer::startWriterThread() {
@@ -3323,6 +3367,7 @@
uint32_t lastSamplesPerChunk = 0;
int64_t lastSampleDurationUs = -1; // Duration calculated from EOS buffer and its timestamp
int64_t lastSampleDurationTicks = -1; // Timescale based ticks
+ int64_t sampleFileOffset = -1;
if (mIsAudio) {
prctl(PR_SET_NAME, (unsigned long)"MP4WtrAudTrkThread", 0, 0, 0);
@@ -3342,6 +3387,7 @@
MediaBufferBase *buffer;
const char *trackName = getTrackType();
while (!mDone && (err = mSource->read(&buffer)) == OK) {
+ ALOGV("read:buffer->range_length:%lld", (long long)buffer->range_length());
int32_t isEOS = false;
if (buffer->range_length() == 0) {
if (buffer->meta_data().findInt32(kKeyIsEndOfStream, &isEOS) && isEOS) {
@@ -3448,6 +3494,14 @@
continue;
}
}
+ if (!buffer->meta_data().findInt64(kKeySampleFileOffset, &sampleFileOffset)) {
+ sampleFileOffset = -1;
+ }
+ int64_t lastSample = -1;
+ if (!buffer->meta_data().findInt64(kKeyLastSampleIndexInChunk, &lastSample)) {
+ lastSample = -1;
+ }
+ ALOGV("sampleFileOffset:%lld", (long long)sampleFileOffset);
/*
* Reserve space in the file for the current sample + to be written MOOV box. If reservation
@@ -3455,7 +3509,7 @@
* write MOOV box successfully as space for the same was reserved in the prior call.
* Release the current buffer/sample here.
*/
- if (!mOwner->preAllocate(buffer->range_length())) {
+ if (sampleFileOffset == -1 && !mOwner->preAllocate(buffer->range_length())) {
buffer->release();
buffer = nullptr;
break;
@@ -3466,9 +3520,14 @@
// Make a deep copy of the MediaBuffer and Metadata and release
// the original as soon as we can
MediaBuffer *copy = new MediaBuffer(buffer->range_length());
- memcpy(copy->data(), (uint8_t *)buffer->data() + buffer->range_offset(),
- buffer->range_length());
+ if (sampleFileOffset != -1) {
+ copy->meta_data().setInt64(kKeySampleFileOffset, sampleFileOffset);
+ } else {
+ memcpy(copy->data(), (uint8_t*)buffer->data() + buffer->range_offset(),
+ buffer->range_length());
+ }
copy->set_range(0, buffer->range_length());
+
meta_data = new MetaData(buffer->meta_data());
buffer->release();
buffer = NULL;
@@ -3476,14 +3535,16 @@
copy->meta_data().setInt32(kKeyExifTiffOffset, tiffHdrOffset);
}
bool usePrefix = this->usePrefix() && !isExif;
-
- if (usePrefix) StripStartcode(copy);
-
+ if (sampleFileOffset == -1 && usePrefix) {
+ StripStartcode(copy);
+ }
size_t sampleSize = copy->range_length();
- if (usePrefix) {
+ if (sampleFileOffset == -1 && usePrefix) {
if (mOwner->useNalLengthFour()) {
+ ALOGV("nallength4");
sampleSize += 4;
} else {
+ ALOGV("nallength2");
sampleSize += 2;
}
}
@@ -3778,7 +3839,8 @@
chunkTimestampUs = timestampUs;
} else {
int64_t chunkDurationUs = timestampUs - chunkTimestampUs;
- if (chunkDurationUs > interleaveDurationUs) {
+ if (chunkDurationUs > interleaveDurationUs || lastSample > 1) {
+ ALOGV("lastSample:%lld", (long long)lastSample);
if (chunkDurationUs > mMaxChunkDurationUs) {
mMaxChunkDurationUs = chunkDurationUs;
}
@@ -5331,4 +5393,4 @@
endBox();
}
-} // namespace android
+} // namespace android
\ No newline at end of file
diff --git a/media/libstagefright/MediaAppender.cpp b/media/libstagefright/MediaAppender.cpp
new file mode 100644
index 0000000..5d80b30
--- /dev/null
+++ b/media/libstagefright/MediaAppender.cpp
@@ -0,0 +1,425 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaAppender"
+
+#include <media/stagefright/MediaAppender.h>
+#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <utils/Log.h>
+// TODO : check if this works for NDK apps without JVM
+// #include <media/ndk/NdkJavaVMHelperPriv.h>
+
+namespace android {
+
+struct MediaAppender::sampleDataInfo {
+ size_t size;
+ int64_t time;
+ size_t exTrackIndex;
+ sp<MetaData> meta;
+};
+
+sp<MediaAppender> MediaAppender::create(int fd, AppendMode mode) {
+ if (fd < 0) {
+ ALOGE("invalid file descriptor");
+ return nullptr;
+ }
+ if (!(mode >= APPEND_MODE_FIRST && mode <= APPEND_MODE_LAST)) {
+ ALOGE("invalid mode %d", mode);
+ return nullptr;
+ }
+ sp<MediaAppender> ma = new (std::nothrow) MediaAppender(fd, mode);
+ if (ma->init() != OK) {
+ return nullptr;
+ }
+ return ma;
+}
+
+// TODO: inject mediamuxer and mediaextractor objects.
+// TODO: @format is not required as an input if we can sniff the file and find the format of
+// the existing content.
+// TODO: Code it to the interface(MediaAppender), and have a separate MediaAppender NDK
+MediaAppender::MediaAppender(int fd, AppendMode mode)
+ : mFd(fd),
+ mMode(mode),
+ // TODO : check if this works for NDK apps without JVM
+ // mExtractor(new NuMediaExtractor(NdkJavaVMHelper::getJNIEnv() != nullptr
+ // ? NuMediaExtractor::EntryPoint::NDK_WITH_JVM
+ // : NuMediaExtractor::EntryPoint::NDK_NO_JVM)),
+ mExtractor(new (std::nothrow) NuMediaExtractor(NuMediaExtractor::EntryPoint::NDK_WITH_JVM)),
+ mTrackCount(0),
+ mState(UNINITIALIZED) {
+ ALOGV("MediaAppender::MediaAppender mode:%d", mode);
+ }
+
+status_t MediaAppender::init() {
+ std::scoped_lock lock(mMutex);
+ ALOGV("MediaAppender::init");
+ status_t status = mExtractor->setDataSource(mFd, 0, lseek(mFd, 0, SEEK_END));
+ if (status != OK) {
+ ALOGE("extractor_setDataSource failed, status :%d", status);
+ return status;
+ }
+
+ if (strcmp("MPEG4Extractor", mExtractor->getName()) == 0) {
+ mFormat = MediaMuxer::OUTPUT_FORMAT_MPEG_4;
+ } else {
+ ALOGE("Unsupported format, extractor name:%s", mExtractor->getName());
+ return ERROR_UNSUPPORTED;
+ }
+
+ mTrackCount = mExtractor->countTracks();
+ ALOGV("mTrackCount:%zu", mTrackCount);
+ if (mTrackCount == 0) {
+ ALOGE("no tracks are present");
+ return ERROR_MALFORMED;
+ }
+ size_t exTrackIndex = 0;
+ ssize_t audioTrackIndex = -1, videoTrackIndex = -1;
+ bool audioSyncSampleTimeSet = false;
+
+ while (exTrackIndex < mTrackCount) {
+ sp<AMessage> fmt;
+ status = mExtractor->getTrackFormat(exTrackIndex, &fmt, 0);
+ if (status != OK) {
+ ALOGE("getTrackFormat failed for trackIndex:%zu, status:%d", exTrackIndex, status);
+ return status;
+ }
+ AString mime;
+ if (fmt->findString("mime", &mime)) {
+ if (!strncasecmp(mime.c_str(), "video/", 6)) {
+ ALOGV("VideoTrack");
+ if (videoTrackIndex != -1) {
+ ALOGE("Not more than one video track is supported");
+ return ERROR_UNSUPPORTED;
+ }
+ videoTrackIndex = exTrackIndex;
+ } else if (!strncasecmp(mime.c_str(), "audio/", 6)) {
+ ALOGV("AudioTrack");
+ if (audioTrackIndex != -1) {
+ ALOGE("Not more than one audio track is supported");
+ }
+ audioTrackIndex = exTrackIndex;
+ } else {
+ ALOGV("Neither Video nor Audio track");
+ }
+ }
+ mFmtIndexMap.emplace(exTrackIndex, fmt);
+ mSampleCountVect.emplace_back(0);
+ mMaxTimestampVect.emplace_back(0);
+ mLastSyncSampleTimeVect.emplace_back(0);
+ status = mExtractor->selectTrack(exTrackIndex);
+ if (status != OK) {
+ ALOGE("selectTrack failed for trackIndex:%zu, status:%d", exTrackIndex, status);
+ return status;
+ }
+ ++exTrackIndex;
+ }
+
+ ALOGV("AudioTrackIndex:%zu, VideoTrackIndex:%zu", audioTrackIndex, videoTrackIndex);
+
+ do {
+ sampleDataInfo tmpSDI;
+ // TODO: read info into members of the struct sampleDataInfo directly
+ size_t sampleSize;
+ status = mExtractor->getSampleSize(&sampleSize);
+ if (status != OK) {
+ ALOGE("getSampleSize failed, status:%d", status);
+ return status;
+ }
+ mSampleSizeVect.emplace_back(sampleSize);
+ tmpSDI.size = sampleSize;
+ int64_t sampleTime = 0;
+ status = mExtractor->getSampleTime(&sampleTime);
+ if (status != OK) {
+ ALOGE("getSampleTime failed, status:%d", status);
+ return status;
+ }
+ mSampleTimeVect.emplace_back(sampleTime);
+ tmpSDI.time = sampleTime;
+ status = mExtractor->getSampleTrackIndex(&exTrackIndex);
+ if (status != OK) {
+ ALOGE("getSampleTrackIndex failed, status:%d", status);
+ return status;
+ }
+ mSampleIndexVect.emplace_back(exTrackIndex);
+ tmpSDI.exTrackIndex = exTrackIndex;
+ ++mSampleCountVect[exTrackIndex];
+ mMaxTimestampVect[exTrackIndex] = std::max(mMaxTimestampVect[exTrackIndex], sampleTime);
+ sp<MetaData> sampleMeta;
+ status = mExtractor->getSampleMeta(&sampleMeta);
+ if (status != OK) {
+ ALOGE("getSampleMeta failed, status:%d", status);
+ return status;
+ }
+ mSampleMetaVect.emplace_back(sampleMeta);
+ int32_t val = 0;
+ if (sampleMeta->findInt32(kKeyIsSyncFrame, &val) && val != 0) {
+ mLastSyncSampleTimeVect[exTrackIndex] = sampleTime;
+ }
+ tmpSDI.meta = sampleMeta;
+ mSDI.emplace_back(tmpSDI);
+ } while (mExtractor->advance() == OK);
+
+ mExtractor.clear();
+
+ std::sort(mSDI.begin(), mSDI.end(), [](sampleDataInfo& a, sampleDataInfo& b) {
+ int64_t aOffset, bOffset;
+ a.meta->findInt64(kKeySampleFileOffset, &aOffset);
+ b.meta->findInt64(kKeySampleFileOffset, &bOffset);
+ return aOffset < bOffset;
+ });
+ for (int64_t syncSampleTime : mLastSyncSampleTimeVect) {
+ ALOGV("before ignoring frames, mLastSyncSampleTimeVect:%lld", (long long)syncSampleTime);
+ }
+ ALOGV("mMode:%u", mMode);
+ if (mMode == APPEND_MODE_IGNORE_LAST_VIDEO_GOP && videoTrackIndex != -1 ) {
+ ALOGV("Video track is present");
+ bool lastVideoIframe = false;
+ size_t lastVideoIframeOffset = 0;
+ int64_t lastVideoSampleTime = -1;
+ for (auto rItr = mSDI.rbegin(); rItr != mSDI.rend(); ++rItr) {
+ if (rItr->exTrackIndex != videoTrackIndex) {
+ continue;
+ }
+ if (lastVideoSampleTime == -1) {
+ lastVideoSampleTime = rItr->time;
+ }
+ int64_t offset = 0;
+ if (!rItr->meta->findInt64(kKeySampleFileOffset, &offset) || offset == 0) {
+ ALOGE("Missing offset");
+ return ERROR_MALFORMED;
+ }
+ ALOGV("offset:%lld", (long long)offset);
+ int32_t val = 0;
+ if (rItr->meta->findInt32(kKeyIsSyncFrame, &val) && val != 0) {
+ ALOGV("sampleTime:%lld", (long long)rItr->time);
+ ALOGV("lastVideoSampleTime:%lld", (long long)lastVideoSampleTime);
+ if (lastVideoIframe == false && (lastVideoSampleTime - rItr->time) >
+ 1000000/* Track interleaving duration in MPEG4Writer*/) {
+ ALOGV("lastVideoIframe got chosen");
+ lastVideoIframe = true;
+ mLastSyncSampleTimeVect[videoTrackIndex] = rItr->time;
+ lastVideoIframeOffset = offset;
+ ALOGV("lastVideoIframeOffset:%lld", (long long)offset);
+ break;
+ }
+ }
+ }
+ if (lastVideoIframe == false) {
+ ALOGV("Need to rewrite all samples");
+ mLastSyncSampleTimeVect[videoTrackIndex] = 0;
+ lastVideoIframeOffset = 0;
+ }
+ unsigned int framesIgnoredCount = 0;
+ for (auto itr = mSDI.begin(); itr != mSDI.end();) {
+ int64_t offset = 0;
+ ALOGV("trackIndex:%zu, %" PRId64 "", itr->exTrackIndex, itr->time);
+ if (itr->meta->findInt64(kKeySampleFileOffset, &offset) &&
+ offset >= lastVideoIframeOffset) {
+ ALOGV("offset:%lld", (long long)offset);
+ if (!audioSyncSampleTimeSet && audioTrackIndex != -1 &&
+ audioTrackIndex == itr->exTrackIndex) {
+ mLastSyncSampleTimeVect[audioTrackIndex] = itr->time;
+ audioSyncSampleTimeSet = true;
+ }
+ itr = mSDI.erase(itr);
+ ++framesIgnoredCount;
+ } else {
+ ++itr;
+ }
+ }
+ ALOGV("framesIgnoredCount:%u", framesIgnoredCount);
+ }
+
+ if (mMode == APPEND_MODE_IGNORE_LAST_VIDEO_GOP && videoTrackIndex == -1 &&
+ audioTrackIndex != -1) {
+ ALOGV("Only AudioTrack is present");
+ for (auto rItr = mSDI.rbegin(); rItr != mSDI.rend(); ++rItr) {
+ int32_t val = 0;
+ if (rItr->meta->findInt32(kKeyIsSyncFrame, &val) && val != 0) {
+ mLastSyncSampleTimeVect[audioTrackIndex] = rItr->time;
+ break;
+ }
+ }
+ unsigned int framesIgnoredCount = 0;
+ for (auto itr = mSDI.begin(); itr != mSDI.end();) {
+ if (itr->time >= mLastSyncSampleTimeVect[audioTrackIndex]) {
+ itr = mSDI.erase(itr);
+ ++framesIgnoredCount;
+ } else {
+ ++itr;
+ }
+ }
+ ALOGV("framesIgnoredCount :%u", framesIgnoredCount);
+ }
+
+ for (size_t i = 0; i < mLastSyncSampleTimeVect.size(); ++i) {
+ ALOGV("mLastSyncSampleTimeVect[%zu]:%lld", i, (long long)mLastSyncSampleTimeVect[i]);
+ mFmtIndexMap[i]->setInt64(
+ "sample-time-before-append" /*AMEDIAFORMAT_KEY_SAMPLE_TIME_BEFORE_APPEND*/,
+ mLastSyncSampleTimeVect[i]);
+ }
+ for (size_t i = 0; i < mMaxTimestampVect.size(); ++i) {
+ ALOGV("mMaxTimestamp[%zu]:%lld", i, (long long)mMaxTimestampVect[i]);
+ }
+ for (size_t i = 0; i < mSampleCountVect.size(); ++i) {
+ ALOGV("SampleCountVect[%zu]:%zu", i, mSampleCountVect[i]);
+ }
+ mState = INITIALIZED;
+ return OK;
+}
+
+MediaAppender::~MediaAppender() {
+ ALOGV("MediaAppender::~MediaAppender");
+ mMuxer.clear();
+ mExtractor.clear();
+}
+
+status_t MediaAppender::start() {
+ std::scoped_lock lock(mMutex);
+ ALOGV("MediaAppender::start");
+ if (mState != INITIALIZED) {
+ ALOGE("MediaAppender::start() is called in invalid state %d", mState);
+ return INVALID_OPERATION;
+ }
+ mMuxer = new (std::nothrow) MediaMuxer(mFd, mFormat);
+ for (const auto& n : mFmtIndexMap) {
+ ssize_t muxIndex = mMuxer->addTrack(n.second);
+ if (muxIndex < 0) {
+ ALOGE("addTrack failed");
+ return UNKNOWN_ERROR;
+ }
+ mTrackIndexMap.emplace(n.first, muxIndex);
+ }
+ ALOGV("trackIndexmap size:%zu", mTrackIndexMap.size());
+
+ status_t status = mMuxer->start();
+ if (status != OK) {
+ ALOGE("muxer start failed:%d", status);
+ return status;
+ }
+
+ ALOGV("Sorting samples based on their offsets");
+ for (int i = 0; i < mSDI.size(); ++i) {
+ ALOGV("i:%d", i + 1);
+ /* TODO : Allocate a single allocation of the max size, and reuse it across ABuffers if
+ * using new ABuffer(void *, size_t).
+ */
+ sp<ABuffer> data = new (std::nothrow) ABuffer(mSDI[i].size);
+ if (data == nullptr) {
+ ALOGE("memory allocation failed");
+ return NO_MEMORY;
+ }
+ data->setRange(0, mSDI[i].size);
+ int32_t val = 0;
+ int sampleFlags = 0;
+ if (mSDI[i].meta->findInt32(kKeyIsSyncFrame, &val) && val != 0) {
+ sampleFlags |= MediaCodec::BUFFER_FLAG_SYNCFRAME;
+ }
+
+ int64_t val64;
+ if (mSDI[i].meta->findInt64(kKeySampleFileOffset, &val64)) {
+ ALOGV("SampleFileOffset Found :%zu:%lld:%lld", mSDI[i].exTrackIndex,
+ (long long)mSampleCountVect[mSDI[i].exTrackIndex], (long long)val64);
+ sp<AMessage> bufMeta = data->meta();
+ bufMeta->setInt64("sample-file-offset" /*AMEDIAFORMAT_KEY_SAMPLE_TIME_BEFORE_APPEND*/,
+ val64);
+ }
+ if (mSDI[i].meta->findInt64(kKeyLastSampleIndexInChunk, &val64)) {
+ ALOGV("kKeyLastSampleIndexInChunk Found %lld:%lld",
+ (long long)mSampleCountVect[mSDI[i].exTrackIndex], (long long)val64);
+ sp<AMessage> bufMeta = data->meta();
+ bufMeta->setInt64(
+ "last-sample-index-in-chunk" /*AMEDIAFORMAT_KEY_LAST_SAMPLE_INDEX_IN_CHUNK*/,
+ val64);
+ }
+ status = mMuxer->writeSampleData(data, mTrackIndexMap[mSDI[i].exTrackIndex], mSDI[i].time,
+ sampleFlags);
+ if (status != OK) {
+ ALOGE("muxer writeSampleData failed:%d", status);
+ return status;
+ }
+ }
+ mState = STARTED;
+ return OK;
+}
+
+status_t MediaAppender::stop() {
+ std::scoped_lock lock(mMutex);
+ ALOGV("MediaAppender::stop");
+ if (mState == STARTED) {
+ status_t status = mMuxer->stop();
+ if (status != OK) {
+ mState = ERROR;
+ } else {
+ mState = STOPPED;
+ }
+ return status;
+ } else {
+ ALOGE("stop() is called in invalid state %d", mState);
+ return INVALID_OPERATION;
+ }
+}
+
+ssize_t MediaAppender::getTrackCount() {
+ std::scoped_lock lock(mMutex);
+ ALOGV("MediaAppender::getTrackCount");
+ if (mState != INITIALIZED && mState != STARTED) {
+ ALOGE("getTrackCount() is called in invalid state %d", mState);
+ return -1;
+ }
+ return mTrackCount;
+}
+
+sp<AMessage> MediaAppender::getTrackFormat(size_t idx) {
+ std::scoped_lock lock(mMutex);
+ ALOGV("MediaAppender::getTrackFormat");
+ if (mState != INITIALIZED && mState != STARTED) {
+ ALOGE("getTrackFormat() is called in invalid state %d", mState);
+ return nullptr;
+ }
+ if (idx < 0 || idx >= mTrackCount) {
+ ALOGE("getTrackFormat() idx is out of range");
+ return nullptr;
+ }
+ return mFmtIndexMap[idx];
+}
+
+status_t MediaAppender::writeSampleData(const sp<ABuffer>& buffer, size_t trackIndex,
+ int64_t timeUs, uint32_t flags) {
+ std::scoped_lock lock(mMutex);
+ ALOGV("writeSampleData:trackIndex:%zu, time:%" PRId64 "", trackIndex, timeUs);
+ return mMuxer->writeSampleData(buffer, trackIndex, timeUs, flags);
+}
+
+status_t MediaAppender::setOrientationHint([[maybe_unused]] int degrees) {
+ ALOGE("setOrientationHint not supported. Has to be called prior to start on initial muxer");
+ return ERROR_UNSUPPORTED;
+};
+
+status_t MediaAppender::setLocation([[maybe_unused]] int latit, [[maybe_unused]] int longit) {
+ ALOGE("setLocation not supported. Has to be called prior to start on initial muxer");
+ return ERROR_UNSUPPORTED;
+}
+
+ssize_t MediaAppender::addTrack([[maybe_unused]] const sp<AMessage> &format) {
+ ALOGE("addTrack not supported");
+ return ERROR_UNSUPPORTED;
+}
+
+} // namespace android
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 26cdec8..50ebeef 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -109,6 +109,7 @@
static const char *kCodecLevel = "android.media.mediacodec.level"; /* 0..n */
static const char *kCodecBitrateMode = "android.media.mediacodec.bitrate_mode"; /* CQ/VBR/CBR */
static const char *kCodecBitrate = "android.media.mediacodec.bitrate"; /* 0..n */
+static const char *kCodecOriginalBitrate = "android.media.mediacodec.original.bitrate"; /* 0..n */
static const char *kCodecMaxWidth = "android.media.mediacodec.maxwidth"; /* 0..n */
static const char *kCodecMaxHeight = "android.media.mediacodec.maxheight"; /* 0..n */
static const char *kCodecError = "android.media.mediacodec.errcode";
@@ -139,6 +140,8 @@
static const char *kCodecRecentLatencyCount = "android.media.mediacodec.recent.n";
static const char *kCodecRecentLatencyHist = "android.media.mediacodec.recent.hist"; /* in us */
+static const char *kCodecShapingEnhanced = "android.media.mediacodec.shaped"; /* 0/1 */
+
// XXX suppress until we get our representation right
static bool kEmitHistogram = false;
@@ -1394,6 +1397,7 @@
* MediaFormat Shaping forward declarations
* including the property name we use for control.
*/
+static int enableMediaFormatShapingDefault = 1;
static const char enableMediaFormatShapingProperty[] = "debug.stagefright.enableshaping";
static void mapFormat(AString componentName, const sp<AMessage> &format, const char *kind,
bool reverse);
@@ -1469,7 +1473,8 @@
}
if (flags & CONFIGURE_FLAG_ENCODE) {
- int8_t enableShaping = property_get_bool(enableMediaFormatShapingProperty, 0);
+ int8_t enableShaping = property_get_bool(enableMediaFormatShapingProperty,
+ enableMediaFormatShapingDefault);
if (!enableShaping) {
ALOGI("format shaping disabled, property '%s'", enableMediaFormatShapingProperty);
} else {
@@ -1556,18 +1561,7 @@
static bool connectFormatShaper() {
static std::once_flag sCheckOnce;
-#if 0
- // an early return if the property says disabled means we skip loading.
- // that saves memory.
-
- // apply framework level modifications to the mediaformat for encoding
- // XXX: default off for a while during dogfooding
- int8_t enableShaping = property_get_bool(enableMediaFormatShapingProperty, 0);
-
- if (!enableShaping) {
- return true;
- }
-#endif
+ ALOGV("connectFormatShaper...");
std::call_once(sCheckOnce, [&](){
@@ -1672,6 +1666,8 @@
//
static const char *featurePrefix = "feature-";
static const int featurePrefixLen = strlen(featurePrefix);
+ static const char *tuningPrefix = "tuning-";
+ static const int tuningPrefixLen = strlen(tuningPrefix);
static const char *mappingPrefix = "mapping-";
static const int mappingPrefixLen = strlen(mappingPrefix);
@@ -1685,6 +1681,14 @@
intValue);
}
continue;
+ } else if (!strncmp(mapSrc, tuningPrefix, tuningPrefixLen)) {
+ AString value;
+ if (details->findString(mapSrc, &value)) {
+ ALOGV("-- tuning '%s' -> '%s'", mapSrc, value.c_str());
+ (void)(sShaperOps->setTuning)(shaperHandle, &mapSrc[tuningPrefixLen],
+ value.c_str());
+ }
+ continue;
} else if (!strncmp(mapSrc, mappingPrefix, mappingPrefixLen)) {
AString target;
if (details->findString(mapSrc, &target)) {
@@ -1801,10 +1805,20 @@
AMediaFormat_getFormat(updatedNdkFormat, &updatedFormat);
sp<AMessage> deltas = updatedFormat->changesFrom(format, false /* deep */);
- ALOGD("shapeMediaFormat: deltas: %s", deltas->debugString(2).c_str());
-
- // note that this means that for anything in both, the copy in deltas wins
- format->extend(deltas);
+ size_t changeCount = deltas->countEntries();
+ ALOGD("shapeMediaFormat: deltas(%zu): %s", changeCount, deltas->debugString(2).c_str());
+ if (changeCount > 0) {
+ if (mMetricsHandle != 0) {
+ mediametrics_setInt32(mMetricsHandle, kCodecShapingEnhanced, changeCount);
+ // save some old properties before we fold in the new ones
+ int32_t bitrate;
+ if (format->findInt32(KEY_BIT_RATE, &bitrate)) {
+ mediametrics_setInt32(mMetricsHandle, kCodecOriginalBitrate, bitrate);
+ }
+ }
+ // NB: for any field in both format and deltas, the deltas copy wins
+ format->extend(deltas);
+ }
}
AMediaFormat_delete(updatedNdkFormat);
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index 876d06c..0107c32 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -169,9 +169,7 @@
}
status_t MediaCodecSource::Puller::setStopTimeUs(int64_t stopTimeUs) {
- sp<AMessage> msg = new AMessage(kWhatSetStopTimeUs, this);
- msg->setInt64("stop-time-us", stopTimeUs);
- return postSynchronouslyAndReturnError(msg);
+ return mSource->setStopTimeUs(stopTimeUs);
}
status_t MediaCodecSource::Puller::start(const sp<MetaData> &meta, const sp<AMessage> ¬ify) {
@@ -189,19 +187,11 @@
}
void MediaCodecSource::Puller::stop() {
- bool interrupt = false;
- {
- // mark stopping before actually reaching kWhatStop on the looper, so the pulling will
- // stop.
- Mutexed<Queue>::Locked queue(mQueue);
- queue->mPulling = false;
- interrupt = queue->mReadPendingSince && (queue->mReadPendingSince < ALooper::GetNowUs() - 1000000);
- queue->flush(); // flush any unprocessed pulled buffers
- }
-
- if (interrupt) {
- interruptSource();
- }
+ // mark stopping before actually reaching kWhatStop on the looper, so the pulling will
+ // stop.
+ Mutexed<Queue>::Locked queue(mQueue);
+ queue->mPulling = false;
+ queue->flush(); // flush any unprocessed pulled buffers
}
void MediaCodecSource::Puller::interruptSource() {
@@ -685,9 +675,9 @@
if (mStopping && reachedEOS) {
ALOGI("encoder (%s) stopped", mIsVideo ? "video" : "audio");
if (mPuller != NULL) {
- mPuller->stopSource();
+ mPuller->interruptSource();
}
- ALOGV("source (%s) stopped", mIsVideo ? "video" : "audio");
+ ALOGI("source (%s) stopped", mIsVideo ? "video" : "audio");
// posting reply to everyone that's waiting
List<sp<AReplyToken>>::iterator it;
for (it = mStopReplyIDQueue.begin();
@@ -715,6 +705,9 @@
status_t MediaCodecSource::feedEncoderInputBuffers() {
MediaBufferBase* mbuf = NULL;
while (!mAvailEncoderInputIndices.empty() && mPuller->readBuffer(&mbuf)) {
+ if (!mEncoder) {
+ return BAD_VALUE;
+ }
size_t bufferIndex = *mAvailEncoderInputIndices.begin();
mAvailEncoderInputIndices.erase(mAvailEncoderInputIndices.begin());
@@ -893,7 +886,7 @@
{
int32_t eos = 0;
if (msg->findInt32("eos", &eos) && eos) {
- ALOGV("puller (%s) reached EOS", mIsVideo ? "video" : "audio");
+ ALOGI("puller (%s) reached EOS", mIsVideo ? "video" : "audio");
signalEOS();
break;
}
@@ -1111,12 +1104,7 @@
if (generation != mGeneration) {
break;
}
-
- if (!(mFlags & FLAG_USE_SURFACE_INPUT)) {
- ALOGV("source (%s) stopping", mIsVideo ? "video" : "audio");
- mPuller->interruptSource();
- ALOGV("source (%s) stopped", mIsVideo ? "video" : "audio");
- }
+ ALOGD("source (%s) stopping stalled", mIsVideo ? "video" : "audio");
signalEOS();
break;
}
@@ -1148,7 +1136,7 @@
if (mFlags & FLAG_USE_SURFACE_INPUT) {
sp<AMessage> params = new AMessage;
params->setInt64(PARAMETER_KEY_OFFSET_TIME, mInputBufferTimeOffsetUs);
- err = mEncoder->setParameters(params);
+ err = mEncoder ? mEncoder->setParameters(params) : BAD_VALUE;
}
sp<AMessage> response = new AMessage;
@@ -1168,7 +1156,7 @@
if (mFlags & FLAG_USE_SURFACE_INPUT) {
sp<AMessage> params = new AMessage;
params->setInt64("stop-time-us", stopTimeUs);
- err = mEncoder->setParameters(params);
+ err = mEncoder ? mEncoder->setParameters(params) : BAD_VALUE;
} else {
err = mPuller->setStopTimeUs(stopTimeUs);
}
diff --git a/media/libstagefright/MediaMuxer.cpp b/media/libstagefright/MediaMuxer.cpp
index c91386d..a946f71 100644
--- a/media/libstagefright/MediaMuxer.cpp
+++ b/media/libstagefright/MediaMuxer.cpp
@@ -76,6 +76,7 @@
mFileMeta.clear();
mWriter.clear();
mTrackList.clear();
+ mFormatList.clear();
}
ssize_t MediaMuxer::addTrack(const sp<AMessage> &format) {
@@ -109,6 +110,8 @@
ALOGW("addTrack() setCaptureRate failed :%d", result);
}
}
+
+ mFormatList.add(format);
return mTrackList.add(newTrack);
}
@@ -224,9 +227,42 @@
ALOGV("BUFFER_FLAG_EOS");
}
+ sp<AMessage> bufMeta = buffer->meta();
+ int64_t val64;
+ if (bufMeta->findInt64("sample-file-offset", &val64)) {
+ sampleMetaData.setInt64(kKeySampleFileOffset, val64);
+ }
+ if (bufMeta->findInt64(
+ "last-sample-index-in-chunk" /*AMEDIAFORMAT_KEY_LAST_SAMPLE_INDEX_IN_CHUNK*/,
+ &val64)) {
+ sampleMetaData.setInt64(kKeyLastSampleIndexInChunk, val64);
+ }
+
sp<MediaAdapter> currentTrack = mTrackList[trackIndex];
// This pushBuffer will wait until the mediaBuffer is consumed.
return currentTrack->pushBuffer(mediaBuffer);
}
+ssize_t MediaMuxer::getTrackCount() {
+ Mutex::Autolock autoLock(mMuxerLock);
+ if (mState != INITIALIZED && mState != STARTED) {
+ ALOGE("getTrackCount() must be called either in INITIALIZED or STARTED state");
+ return -1;
+ }
+ return mTrackList.size();
+}
+
+sp<AMessage> MediaMuxer::getTrackFormat([[maybe_unused]] size_t idx) {
+ Mutex::Autolock autoLock(mMuxerLock);
+ if (mState != INITIALIZED && mState != STARTED) {
+ ALOGE("getTrackFormat() must be called either in INITIALIZED or STARTED state");
+ return nullptr;
+ }
+ if (idx < 0 || idx >= mFormatList.size()) {
+ ALOGE("getTrackFormat() idx is out of range");
+ return nullptr;
+ }
+ return mFormatList[idx];
+}
+
} // namespace android
diff --git a/media/libstagefright/MediaTrack.cpp b/media/libstagefright/MediaTrack.cpp
index 24ba38a..2447f5e 100644
--- a/media/libstagefright/MediaTrack.cpp
+++ b/media/libstagefright/MediaTrack.cpp
@@ -133,6 +133,14 @@
if (format->mFormat->findInt64("target-time", &val64)) {
meta.setInt64(kKeyTargetTime, val64);
}
+ if (format->mFormat->findInt64("sample-file-offset", &val64)) {
+ meta.setInt64(kKeySampleFileOffset, val64);
+ }
+ if (format->mFormat->findInt64(
+ "last-sample-index-in-chunk" /*AMEDIAFORMAT_KEY_LAST_SAMPLE_INDEX_IN_CHUNK*/,
+ &val64)) {
+ meta.setInt64(kKeyLastSampleIndexInChunk, val64);
+ }
int32_t val32;
if (format->mFormat->findInt32("is-sync-frame", &val32)) {
meta.setInt32(kKeyIsSyncFrame, val32);
diff --git a/media/libstagefright/NuMediaExtractor.cpp b/media/libstagefright/NuMediaExtractor.cpp
index f2c7dd6..f0383b5 100644
--- a/media/libstagefright/NuMediaExtractor.cpp
+++ b/media/libstagefright/NuMediaExtractor.cpp
@@ -189,6 +189,11 @@
return err;
}
+const char* NuMediaExtractor::getName() const {
+ Mutex::Autolock autoLock(mLock);
+ return mImpl == nullptr ? nullptr : mImpl->name().string();
+}
+
static String8 arrayToString(const std::vector<uint8_t> &array) {
String8 result;
for (size_t i = 0; i < array.size(); i++) {
diff --git a/media/libstagefright/OWNERS b/media/libstagefright/OWNERS
index 819389d..0cc2294 100644
--- a/media/libstagefright/OWNERS
+++ b/media/libstagefright/OWNERS
@@ -4,4 +4,8 @@
lajos@google.com
marcone@google.com
taklee@google.com
-wonsik@google.com
\ No newline at end of file
+wonsik@google.com
+
+# LON
+olly@google.com
+andrewlewis@google.com
diff --git a/media/libstagefright/TEST_MAPPING b/media/libstagefright/TEST_MAPPING
index dff7b22..7ce2968 100644
--- a/media/libstagefright/TEST_MAPPING
+++ b/media/libstagefright/TEST_MAPPING
@@ -34,6 +34,9 @@
"presubmit": [
{
"name": "mediacodecTest"
+ },
+ {
+ "name": "CtsMediaTranscodingTestCases"
}
],
"postsubmit": [
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 5ede871..04a9b17 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -725,16 +725,19 @@
}
};
-static std::vector<std::pair<const char *, uint32_t>> int64Mappings {
+static std::vector<std::pair<const char*, uint32_t>> int64Mappings {
{
- { "exif-offset", kKeyExifOffset },
- { "exif-size", kKeyExifSize },
- { "xmp-offset", kKeyXmpOffset },
- { "xmp-size", kKeyXmpSize },
- { "target-time", kKeyTargetTime },
- { "thumbnail-time", kKeyThumbnailTime },
- { "timeUs", kKeyTime },
- { "durationUs", kKeyDuration },
+ { "exif-offset", kKeyExifOffset},
+ { "exif-size", kKeyExifSize},
+ { "xmp-offset", kKeyXmpOffset},
+ { "xmp-size", kKeyXmpSize},
+ { "target-time", kKeyTargetTime},
+ { "thumbnail-time", kKeyThumbnailTime},
+ { "timeUs", kKeyTime},
+ { "durationUs", kKeyDuration},
+ { "sample-file-offset", kKeySampleFileOffset},
+ { "last-sample-index-in-chunk", kKeyLastSampleIndexInChunk},
+ { "sample-time-before-append", kKeySampleTimeBeforeAppend},
}
};
diff --git a/media/libstagefright/foundation/OpusHeader.cpp b/media/libstagefright/foundation/OpusHeader.cpp
index 784e802..30d0ae6 100644
--- a/media/libstagefright/foundation/OpusHeader.cpp
+++ b/media/libstagefright/foundation/OpusHeader.cpp
@@ -146,6 +146,10 @@
int WriteOpusHeader(const OpusHeader &header, int input_sample_rate,
uint8_t* output, size_t output_size) {
// See https://wiki.xiph.org/OggOpus#ID_Header.
+ if (header.channels < 1 || header.channels > kMaxChannels) {
+ ALOGE("Invalid channel count: %d", header.channels);
+ return -1;
+ }
const size_t total_size = kOpusHeaderStreamMapOffset + header.channels;
if (output_size < total_size) {
ALOGE("Output buffer too small for header.");
diff --git a/media/libstagefright/include/media/stagefright/MPEG4Writer.h b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
index 2582ed0..7f2728e 100644
--- a/media/libstagefright/include/media/stagefright/MPEG4Writer.h
+++ b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
@@ -106,6 +106,7 @@
off64_t mOffset;
off64_t mPreAllocateFileEndOffset; //End of file offset during preallocation.
off64_t mMdatOffset;
+ off64_t mMaxOffsetAppend; // File offset written upto while appending.
off64_t mMdatEndOffset; // End offset of mdat atom.
uint8_t *mInMemoryCache;
off64_t mInMemoryCacheOffset;
diff --git a/media/libstagefright/include/media/stagefright/MediaAppender.h b/media/libstagefright/include/media/stagefright/MediaAppender.h
new file mode 100644
index 0000000..c2f6f10
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/MediaAppender.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_APPENDER_H
+#define ANDROID_MEDIA_APPENDER_H
+
+#include <media/stagefright/MediaMuxer.h>
+#include <media/stagefright/NuMediaExtractor.h>
+#include <stack>
+
+namespace android {
+
+struct MediaAppender : public MediaMuxerBase {
+public:
+ enum AppendMode {
+ APPEND_MODE_FIRST = 0,
+ APPEND_MODE_IGNORE_LAST_VIDEO_GOP = APPEND_MODE_FIRST,
+ APPEND_MODE_ADD_TO_EXISTING_DATA = 1,
+ APPEND_MODE_LAST = APPEND_MODE_ADD_TO_EXISTING_DATA,
+ };
+
+ static sp<MediaAppender> create(int fd, AppendMode mode);
+
+ virtual ~MediaAppender();
+
+ status_t init();
+
+ status_t start();
+
+ status_t stop();
+
+ status_t writeSampleData(const sp<ABuffer>& buffer, size_t trackIndex, int64_t timeUs,
+ uint32_t flags);
+
+ status_t setOrientationHint(int degrees);
+
+ status_t setLocation(int latitude, int longitude);
+
+ ssize_t addTrack(const sp<AMessage> &format);
+
+ ssize_t getTrackCount();
+
+ sp<AMessage> getTrackFormat(size_t idx);
+
+private:
+ MediaAppender(int fd, AppendMode mode);
+
+ int mFd;
+ MediaMuxer::OutputFormat mFormat;
+ AppendMode mMode;
+ sp<NuMediaExtractor> mExtractor;
+ sp<MediaMuxer> mMuxer;
+ size_t mTrackCount;
+ // Map track index given by extractor to the ones received from muxer.
+ std::map<size_t, ssize_t> mTrackIndexMap;
+ // Count of the samples in each track, indexed by extractor track ids.
+ std::vector<size_t> mSampleCountVect;
+ // Extractor track index of samples.
+ std::vector<size_t> mSampleIndexVect;
+ // Track format indexed by extractor track ids.
+ std::map<size_t, sp<AMessage>> mFmtIndexMap;
+ // Size of samples.
+ std::vector<size_t> mSampleSizeVect;
+ // Presentation time stamp of samples.
+ std::vector<int64_t> mSampleTimeVect;
+ // Timestamp of last sample of tracks.
+ std::vector<int64_t> mMaxTimestampVect;
+ // Metadata of samples.
+ std::vector<sp<MetaData>> mSampleMetaVect;
+ std::mutex mMutex;
+ // Timestamp of the last sync sample of tracks.
+ std::vector<int64_t> mLastSyncSampleTimeVect;
+
+ struct sampleDataInfo;
+ std::vector<sampleDataInfo> mSDI;
+
+ enum : int {
+ UNINITIALIZED,
+ INITIALIZED,
+ STARTED,
+ STOPPED,
+ ERROR,
+ } mState GUARDED_BY(mMutex);
+};
+
+} // namespace android
+#endif // ANDROID_MEDIA_APPENDER_H
\ No newline at end of file
diff --git a/media/libstagefright/include/media/stagefright/MediaMuxer.h b/media/libstagefright/include/media/stagefright/MediaMuxer.h
index a1b9465..e97a65e 100644
--- a/media/libstagefright/include/media/stagefright/MediaMuxer.h
+++ b/media/libstagefright/include/media/stagefright/MediaMuxer.h
@@ -22,7 +22,12 @@
#include <utils/Vector.h>
#include <utils/threads.h>
+#include <map>
+#include <mutex>
+#include <vector>
+
#include "media/stagefright/foundation/ABase.h"
+#include "MediaMuxerBase.h"
namespace android {
@@ -33,6 +38,7 @@
struct MediaSource;
class MetaData;
struct MediaWriter;
+struct NuMediaExtractor;
// MediaMuxer is used to mux multiple tracks into a video. Currently, we only
// support a mp4 file as the output.
@@ -40,19 +46,8 @@
// Constructor -> addTrack+ -> start -> writeSampleData+ -> stop
// If muxing operation need to be cancelled, the app is responsible for
// deleting the output file after stop.
-struct MediaMuxer : public RefBase {
+struct MediaMuxer : public MediaMuxerBase {
public:
- // Please update media/java/android/media/MediaMuxer.java if the
- // OutputFormat is updated.
- enum OutputFormat {
- OUTPUT_FORMAT_MPEG_4 = 0,
- OUTPUT_FORMAT_WEBM = 1,
- OUTPUT_FORMAT_THREE_GPP = 2,
- OUTPUT_FORMAT_HEIF = 3,
- OUTPUT_FORMAT_OGG = 4,
- OUTPUT_FORMAT_LIST_END // must be last - used to validate format type
- };
-
// Construct the muxer with the file descriptor. Note that the MediaMuxer
// will close this file at stop().
MediaMuxer(int fd, OutputFormat format);
@@ -117,10 +112,25 @@
status_t writeSampleData(const sp<ABuffer> &buffer, size_t trackIndex,
int64_t timeUs, uint32_t flags) ;
+ /**
+ * Gets the number of tracks added successfully. Should be called in
+ * INITIALIZED(after constructor) or STARTED(after start()) state.
+ * @return the number of tracks or -1 in wrong state.
+ */
+ ssize_t getTrackCount();
+
+ /**
+ * Gets the format of the track by their index.
+ * @param idx : index of the track whose format is wanted.
+ * @return smart pointer to AMessage containing the format details.
+ */
+ sp<AMessage> getTrackFormat(size_t idx);
+
private:
const OutputFormat mFormat;
sp<MediaWriter> mWriter;
Vector< sp<MediaAdapter> > mTrackList; // Each track has its MediaAdapter.
+ Vector< sp<AMessage> > mFormatList; // Format of each track.
sp<MetaData> mFileMeta; // Metadata for the whole file.
Mutex mMuxerLock;
diff --git a/media/libstagefright/include/media/stagefright/MediaMuxerBase.h b/media/libstagefright/include/media/stagefright/MediaMuxerBase.h
new file mode 100644
index 0000000..f02d510
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/MediaMuxerBase.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_MUXER_BASE_H_
+#define MEDIA_MUXER_BASE_H_
+
+#include <utils/RefBase.h>
+#include "media/stagefright/foundation/ABase.h"
+
+namespace android {
+
+struct ABuffer;
+struct AMessage;
+
+// MediaMuxer is used to mux multiple tracks into a video. Currently, we only
+// support a mp4 file as the output.
+// The expected calling order of the functions is:
+// Constructor -> addTrack+ -> start -> writeSampleData+ -> stop
+// If muxing operation need to be cancelled, the app is responsible for
+// deleting the output file after stop.
+struct MediaMuxerBase : public RefBase {
+public:
+ // Please update media/java/android/media/MediaMuxer.java if the
+ // OutputFormat is updated.
+ enum OutputFormat {
+ OUTPUT_FORMAT_MPEG_4 = 0,
+ OUTPUT_FORMAT_WEBM = 1,
+ OUTPUT_FORMAT_THREE_GPP = 2,
+ OUTPUT_FORMAT_HEIF = 3,
+ OUTPUT_FORMAT_OGG = 4,
+ OUTPUT_FORMAT_LIST_END // must be last - used to validate format type
+ };
+
+ // Construct the muxer with the file descriptor. Note that the MediaMuxer
+ // will close this file at stop().
+ MediaMuxerBase() {};
+
+ virtual ~MediaMuxerBase() {};
+
+ /**
+ * Add a track with its format information. This should be
+ * called before start().
+ * @param format the track's format.
+ * @return the track's index or negative number if error.
+ */
+ virtual ssize_t addTrack(const sp<AMessage> &format) = 0;
+
+ /**
+ * Start muxing. Make sure all the tracks have been added before
+ * calling this.
+ */
+ virtual status_t start() = 0;
+
+ /**
+ * Set the orientation hint.
+ * @param degrees The rotation degrees. It has to be either 0,
+ * 90, 180 or 270.
+ * @return OK if no error.
+ */
+ virtual status_t setOrientationHint(int degrees) = 0;
+
+ /**
+ * Set the location.
+ * @param latitude The latitude in degree x 1000. Its value must be in the range
+ * [-900000, 900000].
+ * @param longitude The longitude in degree x 1000. Its value must be in the range
+ * [-1800000, 1800000].
+ * @return OK if no error.
+ */
+ virtual status_t setLocation(int latitude, int longitude) = 0;
+
+ /**
+ * Stop muxing.
+ * This method is a blocking call. Depending on how
+ * much data is bufferred internally, the time needed for stopping
+ * the muxer may be time consuming. UI thread is
+ * not recommended for launching this call.
+ * @return OK if no error.
+ */
+ virtual status_t stop() = 0;
+
+ /**
+ * Send a sample buffer for muxing.
+ * The buffer can be reused once this method returns. Typically,
+ * this function won't be blocked for very long, and thus there
+ * is no need to use a separate thread calling this method to
+ * push a buffer.
+ * @param buffer the incoming sample buffer.
+ * @param trackIndex the buffer's track index number.
+ * @param timeUs the buffer's time stamp.
+ * @param flags the only supported flag for now is
+ * MediaCodec::BUFFER_FLAG_SYNCFRAME.
+ * @return OK if no error.
+ */
+ virtual status_t writeSampleData(const sp<ABuffer> &buffer, size_t trackIndex,
+ int64_t timeUs, uint32_t flags) = 0 ;
+
+ /**
+ * Gets the number of tracks added successfully. Should be called in
+ * INITIALIZED(after constructor) or STARTED(after start()) state.
+ * @return the number of tracks or -1 in wrong state.
+ */
+ virtual ssize_t getTrackCount() = 0;
+
+ /**
+ * Gets the format of the track by their index.
+ * @param idx : index of the track whose format is wanted.
+ * @return smart pointer to AMessage containing the format details.
+ */
+ virtual sp<AMessage> getTrackFormat(size_t idx) = 0;
+
+private:
+
+ DISALLOW_EVIL_CONSTRUCTORS(MediaMuxerBase);
+};
+
+} // namespace android
+
+#endif // MEDIA_MUXER_BASE_H_
+
diff --git a/media/libstagefright/include/media/stagefright/MetaDataBase.h b/media/libstagefright/include/media/stagefright/MetaDataBase.h
index 940bd86..408872f 100644
--- a/media/libstagefright/include/media/stagefright/MetaDataBase.h
+++ b/media/libstagefright/include/media/stagefright/MetaDataBase.h
@@ -264,6 +264,11 @@
// Slow-motion markers
kKeySlowMotionMarkers = 'slmo', // raw data, byte array following spec for
// MediaFormat#KEY_SLOW_MOTION_MARKERS
+
+ kKeySampleFileOffset = 'sfof', // int64_t, sample's offset in a media file.
+ kKeyLastSampleIndexInChunk = 'lsic', //int64_t, index of last sample in a chunk.
+ kKeySampleTimeBeforeAppend = 'lsba', // int64_t, timestamp of last sample of a track.
+
};
enum {
diff --git a/media/libstagefright/include/media/stagefright/NuMediaExtractor.h b/media/libstagefright/include/media/stagefright/NuMediaExtractor.h
index d8f2b00..6aa7c0f 100644
--- a/media/libstagefright/include/media/stagefright/NuMediaExtractor.h
+++ b/media/libstagefright/include/media/stagefright/NuMediaExtractor.h
@@ -100,6 +100,10 @@
status_t getAudioPresentations(size_t trackIdx, AudioPresentationCollection *presentations);
+ status_t setPlaybackId(const String8& playbackId);
+
+ const char* getName() const;
+
protected:
virtual ~NuMediaExtractor();
diff --git a/media/libstagefright/xmlparser/MediaCodecsXmlParser.cpp b/media/libstagefright/xmlparser/MediaCodecsXmlParser.cpp
index 5a9760d..67c6102 100644
--- a/media/libstagefright/xmlparser/MediaCodecsXmlParser.cpp
+++ b/media/libstagefright/xmlparser/MediaCodecsXmlParser.cpp
@@ -146,7 +146,10 @@
};
static std::vector<std::string> names = {
prefixes[0] + variants[0] + ".xml",
- prefixes[1] + variants[1] + ".xml"
+ prefixes[1] + variants[1] + ".xml",
+
+ // shaping information is not currently variant specific.
+ "media_codecs_shaping.xml"
};
return names;
}
@@ -347,6 +350,7 @@
status_t addFeature(const char **attrs);
status_t addLimit(const char **attrs);
status_t addMapping(const char **attrs);
+ status_t addTuning(const char **attrs);
status_t addQuirk(const char **attrs, const char *prefix = nullptr);
status_t addSetting(const char **attrs, const char *prefix = nullptr);
status_t enterMediaCodec(const char **attrs, bool encoder);
@@ -429,7 +433,7 @@
if (findFileInDirs(searchDirs, fileName, &path)) {
err = parseXmlPath(path);
} else {
- ALOGD("Cannot find %s in search path", fileName.c_str());
+ ALOGI("Did not find %s in search path", fileName.c_str());
}
res = combineStatus(res, err);
}
@@ -439,7 +443,7 @@
status_t MediaCodecsXmlParser::Impl::parseXmlPath(const std::string &path) {
std::lock_guard<std::mutex> guard(mLock);
if (!fileExists(path)) {
- ALOGD("Cannot find %s", path.c_str());
+ ALOGV("Cannot find %s", path.c_str());
mParsingStatus = combineStatus(mParsingStatus, NAME_NOT_FOUND);
return NAME_NOT_FOUND;
}
@@ -743,7 +747,8 @@
// ignore limits and features specified outside of type
if (!mState->inType()
&& (strEq(name, "Limit") || strEq(name, "Feature")
- || strEq(name, "Variant") || strEq(name, "Mapping"))) {
+ || strEq(name, "Variant") || strEq(name, "Mapping")
+ || strEq(name, "Tuning"))) {
PLOGD("ignoring %s specified outside of a Type", name);
return;
} else if (strEq(name, "Limit")) {
@@ -752,6 +757,8 @@
err = addFeature(attrs);
} else if (strEq(name, "Mapping")) {
err = addMapping(attrs);
+ } else if (strEq(name, "Tuning")) {
+ err = addTuning(attrs);
} else if (strEq(name, "Variant") && section != SECTION_VARIANT) {
err = limitVariants(attrs);
mState->enterSection(err == OK ? SECTION_VARIANT : SECTION_UNKNOWN);
@@ -1445,6 +1452,45 @@
return OK;
}
+status_t MediaCodecsXmlParser::Impl::Parser::addTuning(const char **attrs) {
+ CHECK(mState->inType());
+ size_t i = 0;
+ const char *a_name = nullptr;
+ const char *a_value = nullptr;
+
+ while (attrs[i] != nullptr) {
+ CHECK((i & 1) == 0);
+ if (attrs[i + 1] == nullptr) {
+ PLOGD("Mapping: attribute '%s' is null", attrs[i]);
+ return BAD_VALUE;
+ }
+
+ if (strEq(attrs[i], "name")) {
+ a_name = attrs[++i];
+ } else if (strEq(attrs[i], "value")) {
+ a_value = attrs[++i];
+ } else {
+ PLOGD("Tuning: ignoring unrecognized attribute '%s'", attrs[i]);
+ ++i;
+ }
+ ++i;
+ }
+
+ // Every tuning must have both fields
+ if (a_name == nullptr) {
+ PLOGD("Tuning with no 'name' attribute");
+ return BAD_VALUE;
+ }
+
+ if (a_value == nullptr) {
+ PLOGD("Tuning with no 'value' attribute");
+ return BAD_VALUE;
+ }
+
+ mState->addDetail(std::string("tuning-") + a_name, a_value);
+ return OK;
+}
+
status_t MediaCodecsXmlParser::Impl::Parser::addAlias(const char **attrs) {
CHECK(mState->inCodec());
size_t i = 0;
diff --git a/media/libstagefright/xmlparser/api/current.txt b/media/libstagefright/xmlparser/api/current.txt
index 6f55dc0..ecfd85e 100644
--- a/media/libstagefright/xmlparser/api/current.txt
+++ b/media/libstagefright/xmlparser/api/current.txt
@@ -87,6 +87,7 @@
method public String getName();
method public java.util.List<media.codecs.Quirk> getQuirk_optional();
method public String getRank();
+ method public java.util.List<media.codecs.Tuning> getTuning_optional();
method public String getType();
method public java.util.List<media.codecs.Type> getType_optional();
method public String getUpdate();
@@ -136,6 +137,14 @@
method public java.util.List<media.codecs.Setting> getVariant_optional();
}
+ public class Tuning {
+ ctor public Tuning();
+ method public String getName();
+ method public String getValue();
+ method public void setName(String);
+ method public void setValue(String);
+ }
+
public class Type {
ctor public Type();
method public java.util.List<media.codecs.Alias> getAlias();
diff --git a/media/libstagefright/xmlparser/media_codecs.xsd b/media/libstagefright/xmlparser/media_codecs.xsd
index 30974f6..c9a7efc 100644
--- a/media/libstagefright/xmlparser/media_codecs.xsd
+++ b/media/libstagefright/xmlparser/media_codecs.xsd
@@ -64,6 +64,7 @@
<xs:element name="Limit" type="Limit" minOccurs="0" maxOccurs="unbounded"/>
<xs:element name="Feature" type="Feature" minOccurs="0" maxOccurs="unbounded"/>
<xs:element name="Mapping" type="Mapping" minOccurs="0" maxOccurs="unbounded"/>
+ <xs:element name="Tuning" type="Tuning" minOccurs="0" maxOccurs="unbounded"/>
<xs:element name="Variant" type="Variant" minOccurs="0" maxOccurs="unbounded"/>
</xs:choice>
<xs:attribute name="name" type="xs:string"/>
@@ -128,6 +129,10 @@
<xs:attribute name="kind" type="xs:string"/>
<xs:attribute name="value" type="xs:string"/>
</xs:complexType>
+ <xs:complexType name="Tuning">
+ <xs:attribute name="name" type="xs:string"/>
+ <xs:attribute name="value" type="xs:string"/>
+ </xs:complexType>
<xs:complexType name="Include">
<xs:attribute name="href" type="xs:string"/>
</xs:complexType>
diff --git a/media/libstagefright/xmlparser/test/XMLParserTest.cpp b/media/libstagefright/xmlparser/test/XMLParserTest.cpp
index c411c8d..7629d97 100644
--- a/media/libstagefright/xmlparser/test/XMLParserTest.cpp
+++ b/media/libstagefright/xmlparser/test/XMLParserTest.cpp
@@ -138,6 +138,12 @@
pair<string, string>("mapping-fire-from", "to"),
},
{}, "");
+ setCodecProperties("test11.encoder", true, 11, {}, {}, {}, "video/av01",
+ {
+ pair<string, string>("tuning-hungry", "yes"),
+ pair<string, string>("tuning-pi", "3.1415"),
+ },
+ {}, "");
setRoleProperties("audio_decoder.mp3", false, 1, "audio/mpeg", "test1.decoder",
{pair<string, string>("attribute::disabled", "present"),
@@ -180,6 +186,11 @@
setRoleProperties("video_encoder.hevc", true, 10, "video/hevc", "test10.encoder",
{ pair<string, string>("mapping-fire-from", "to")});
+ setRoleProperties("video_encoder.av01", true, 11, "video/av01", "test11.encoder",
+ {pair<string, string>("tuning-hungry", "yes"),
+ pair<string, string>("tuning-pi", "3.1415")
+ });
+
setServiceAttribute(
{pair<string, string>("domain-telephony", "0"), pair<string, string>("domain-tv", "0"),
pair<string, string>("setting2", "0"), pair<string, string>("variant-variant1", "0")});
diff --git a/media/libstagefright/xmlparser/test/testdata/media_codecs_unit_test.xml b/media/libstagefright/xmlparser/test/testdata/media_codecs_unit_test.xml
index c8913e5..8cae423 100644
--- a/media/libstagefright/xmlparser/test/testdata/media_codecs_unit_test.xml
+++ b/media/libstagefright/xmlparser/test/testdata/media_codecs_unit_test.xml
@@ -83,5 +83,10 @@
<MediaCodec name="test10.encoder" type="video/hevc" >
<Mapping kind="fire" name="from" value="to"/>
</MediaCodec>
+ <!-- entry for testing Tuning -->
+ <MediaCodec name="test11.encoder" type="video/av01" >
+ <Tuning name="hungry" value="yes"/>
+ <Tuning name="pi" value="3.1415"/>
+ </MediaCodec>
</Encoders>
</Included>
diff --git a/media/ndk/NdkImagePriv.h b/media/ndk/NdkImagePriv.h
index b019448..05115b9 100644
--- a/media/ndk/NdkImagePriv.h
+++ b/media/ndk/NdkImagePriv.h
@@ -40,6 +40,14 @@
*/
AIMAGE_FORMAT_RAW_DEPTH = 0x1002,
+
+ /**
+ * Device specific 10 bits depth RAW image format.
+ *
+ * <p>Unprocessed implementation-dependent raw depth measurements, opaque with 10 bit samples
+ * and device specific bit layout.</p>
+ */
+ AIMAGE_FORMAT_RAW_DEPTH10 = 0x1003,
};
// TODO: this only supports ImageReader
diff --git a/media/ndk/NdkImageReader.cpp b/media/ndk/NdkImageReader.cpp
index b75901a..1067e24 100644
--- a/media/ndk/NdkImageReader.cpp
+++ b/media/ndk/NdkImageReader.cpp
@@ -72,6 +72,7 @@
case AIMAGE_FORMAT_Y8:
case AIMAGE_FORMAT_HEIC:
case AIMAGE_FORMAT_DEPTH_JPEG:
+ case AIMAGE_FORMAT_RAW_DEPTH10:
return true;
case AIMAGE_FORMAT_PRIVATE:
// For private format, cpu usage is prohibited.
@@ -102,6 +103,7 @@
case AIMAGE_FORMAT_Y8:
case AIMAGE_FORMAT_HEIC:
case AIMAGE_FORMAT_DEPTH_JPEG:
+ case AIMAGE_FORMAT_RAW_DEPTH10:
return 1;
case AIMAGE_FORMAT_PRIVATE:
return 0;
diff --git a/media/ndk/NdkMediaExtractor.cpp b/media/ndk/NdkMediaExtractor.cpp
index 0c65e9e..07fc5de 100644
--- a/media/ndk/NdkMediaExtractor.cpp
+++ b/media/ndk/NdkMediaExtractor.cpp
@@ -419,6 +419,7 @@
EXPORT
media_status_t AMediaExtractor_getSampleFormat(AMediaExtractor *ex, AMediaFormat *fmt) {
+ ALOGV("AMediaExtractor_getSampleFormat");
if (fmt == NULL) {
return AMEDIA_ERROR_INVALID_PARAMETER;
}
@@ -428,6 +429,9 @@
if (err != OK) {
return translate_error(err);
}
+#ifdef LOG_NDEBUG
+ sampleMeta->dumpToLog();
+#endif
sp<AMessage> meta;
AMediaFormat_getFormat(fmt, &meta);
@@ -483,6 +487,19 @@
meta->setBuffer(AMEDIAFORMAT_KEY_AUDIO_PRESENTATION_INFO, audioPresentationsData);
}
+ int64_t val64;
+ if (sampleMeta->findInt64(kKeySampleFileOffset, &val64)) {
+ meta->setInt64("sample-file-offset", val64);
+ ALOGV("SampleFileOffset Found");
+ }
+ if (sampleMeta->findInt64(kKeyLastSampleIndexInChunk, &val64)) {
+ meta->setInt64("last-sample-index-in-chunk" /*AMEDIAFORMAT_KEY_LAST_SAMPLE_INDEX_IN_CHUNK*/,
+ val64);
+ ALOGV("kKeyLastSampleIndexInChunk Found");
+ }
+
+ ALOGV("AMediaFormat_toString:%s", AMediaFormat_toString(fmt));
+
return AMEDIA_OK;
}
diff --git a/media/ndk/NdkMediaFormat.cpp b/media/ndk/NdkMediaFormat.cpp
index 1773023..c1793ce 100644
--- a/media/ndk/NdkMediaFormat.cpp
+++ b/media/ndk/NdkMediaFormat.cpp
@@ -334,6 +334,7 @@
EXPORT const char* AMEDIAFORMAT_KEY_IS_SYNC_FRAME = "is-sync-frame";
EXPORT const char* AMEDIAFORMAT_KEY_I_FRAME_INTERVAL = "i-frame-interval";
EXPORT const char* AMEDIAFORMAT_KEY_LANGUAGE = "language";
+EXPORT const char* AMEDIAFORMAT_KEY_LAST_SAMPLE_INDEX_IN_CHUNK = "last-sample-index-in-chunk";
EXPORT const char* AMEDIAFORMAT_KEY_LATENCY = "latency";
EXPORT const char* AMEDIAFORMAT_KEY_LEVEL = "level";
EXPORT const char* AMEDIAFORMAT_KEY_LOCATION = "location";
@@ -359,7 +360,9 @@
EXPORT const char* AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP = "push-blank-buffers-on-shutdown";
EXPORT const char* AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER = "repeat-previous-frame-after";
EXPORT const char* AMEDIAFORMAT_KEY_ROTATION = "rotation-degrees";
+EXPORT const char* AMEDIAFORMAT_KEY_SAMPLE_FILE_OFFSET = "sample-file-offset";
EXPORT const char* AMEDIAFORMAT_KEY_SAMPLE_RATE = "sample-rate";
+EXPORT const char* AMEDIAFORMAT_KEY_SAMPLE_TIME_BEFORE_APPEND = "sample-time-before-append";
EXPORT const char* AMEDIAFORMAT_KEY_SAR_HEIGHT = "sar-height";
EXPORT const char* AMEDIAFORMAT_KEY_SAR_WIDTH = "sar-width";
EXPORT const char* AMEDIAFORMAT_KEY_SEI = "sei";
diff --git a/media/ndk/NdkMediaMuxer.cpp b/media/ndk/NdkMediaMuxer.cpp
index d1992bf..1965e62 100644
--- a/media/ndk/NdkMediaMuxer.cpp
+++ b/media/ndk/NdkMediaMuxer.cpp
@@ -17,28 +17,24 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "NdkMediaMuxer"
-
-#include <media/NdkMediaMuxer.h>
+#include <android_util_Binder.h>
+#include <jni.h>
+#include <media/IMediaHTTPService.h>
#include <media/NdkMediaCodec.h>
#include <media/NdkMediaErrorPriv.h>
#include <media/NdkMediaFormatPriv.h>
-
-
-#include <utils/Log.h>
-#include <utils/StrongPointer.h>
+#include <media/NdkMediaMuxer.h>
+#include <media/stagefright/MediaAppender.h>
+#include <media/stagefright/MediaMuxer.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/MediaMuxer.h>
-#include <media/IMediaHTTPService.h>
-#include <android_util_Binder.h>
-
-#include <jni.h>
+#include <utils/Log.h>
+#include <utils/StrongPointer.h>
using namespace android;
struct AMediaMuxer {
- sp<MediaMuxer> mImpl;
-
+ sp<MediaMuxerBase> mImpl;
};
extern "C" {
@@ -46,8 +42,15 @@
EXPORT
AMediaMuxer* AMediaMuxer_new(int fd, OutputFormat format) {
ALOGV("ctor");
- AMediaMuxer *mData = new AMediaMuxer();
- mData->mImpl = new MediaMuxer(fd, (android::MediaMuxer::OutputFormat)format);
+ AMediaMuxer *mData = new (std::nothrow) AMediaMuxer();
+ if (mData == nullptr) {
+ return nullptr;
+ }
+ mData->mImpl = new (std::nothrow) MediaMuxer(fd, (android::MediaMuxer::OutputFormat)format);
+ if (mData->mImpl == nullptr) {
+ delete mData;
+ return nullptr;
+ }
return mData;
}
@@ -94,6 +97,34 @@
muxer->mImpl->writeSampleData(buf, trackIdx, info->presentationTimeUs, info->flags));
}
+EXPORT
+AMediaMuxer* AMediaMuxer_append(int fd, AppendMode mode) {
+ ALOGV("append");
+ AMediaMuxer* mData = new (std::nothrow) AMediaMuxer();
+ if (mData == nullptr) {
+ return nullptr;
+ }
+ mData->mImpl = MediaAppender::create(fd, (android::MediaAppender::AppendMode)mode);
+ if (mData->mImpl == nullptr) {
+ delete mData;
+ return nullptr;
+ }
+ return mData;
+}
+
+EXPORT
+ssize_t AMediaMuxer_getTrackCount(AMediaMuxer* muxer) {
+ return muxer->mImpl->getTrackCount();
+}
+
+EXPORT
+AMediaFormat* AMediaMuxer_getTrackFormat(AMediaMuxer* muxer, size_t idx) {
+ sp<AMessage> format = muxer->mImpl->getTrackFormat(idx);
+ if (format != nullptr) {
+ return AMediaFormat_fromMsg(&format);
+ }
+ return nullptr;
+}
} // extern "C"
diff --git a/media/ndk/include/media/NdkImage.h b/media/ndk/include/media/NdkImage.h
index e19dd3a..71bc6d9 100644
--- a/media/ndk/include/media/NdkImage.h
+++ b/media/ndk/include/media/NdkImage.h
@@ -50,7 +50,10 @@
*/
typedef struct AImage AImage;
-// Formats not listed here will not be supported by AImageReader
+/**
+ * AImage supported formats: AImageReader only guarantees the support for the formats
+ * listed here.
+ */
enum AIMAGE_FORMATS {
/**
* 32 bits RGBA format, 8 bits for each of the four channels.
@@ -813,7 +816,7 @@
* Available since API level 26.
*
* @param image the {@link AImage} of interest.
- * @param outBuffer The memory area pointed to by buffer will contain the acquired AHardwareBuffer
+ * @param buffer The memory area pointed to by buffer will contain the acquired AHardwareBuffer
* handle.
* @return <ul>
* <li>{@link AMEDIA_OK} if the method call succeeds.</li>
diff --git a/media/ndk/include/media/NdkImageReader.h b/media/ndk/include/media/NdkImageReader.h
index d86f3c7..4bd7f2a 100644
--- a/media/ndk/include/media/NdkImageReader.h
+++ b/media/ndk/include/media/NdkImageReader.h
@@ -328,10 +328,10 @@
* still acquire images from this {@link AImageReader} and access {@link AHardwareBuffer} via
* {@link AImage_getHardwareBuffer()}. The {@link AHardwareBuffer} gained this way can then
* be passed back to hardware (such as GPU or hardware encoder if supported) for future processing.
- * For example, you can obtain an {@link EGLClientBuffer} from the {@link AHardwareBuffer} by using
- * {@link eglGetNativeClientBufferANDROID} extension and pass that {@link EGLClientBuffer} to {@link
- * eglCreateImageKHR} to create an {@link EGLImage} resource type, which may then be bound to a
- * texture via {@link glEGLImageTargetTexture2DOES} on supported devices. This can be useful for
+ * For example, you can obtain an EGLClientBuffer from the {@link AHardwareBuffer} by using
+ * eglGetNativeClientBufferANDROID extension and pass that EGLClientBuffer to
+ * eglCreateImageKHR to create an EGLImage resource type, which may then be bound to a
+ * texture via glEGLImageTargetTexture2DOES on supported devices. This can be useful for
* transporting textures that may be shared cross-process.</p>
* <p>In general, when software access to image data is not necessary, an {@link AImageReader}
* created with {@link AIMAGE_FORMAT_PRIVATE} format is more efficient, compared with {@link
@@ -339,7 +339,7 @@
*
* <p>Note that not all format and usage flag combination is supported by the {@link AImageReader},
* especially if \c format is {@link AIMAGE_FORMAT_PRIVATE}, \c usage must not include either
- * {@link AHARDWAREBUFFER_USAGE_READ_RARELY} or {@link AHARDWAREBUFFER_USAGE_READ_OFTEN}</p>
+ * {@link AHARDWAREBUFFER_USAGE_CPU_READ_RARELY} or {@link AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN}</p>
*
* @param width The default width in pixels of the Images that this reader will produce.
* @param height The default height in pixels of the Images that this reader will produce.
@@ -358,7 +358,7 @@
* <th>Compatible usage flags</th>
* </tr>
* <tr>
- * <td>non-{@link AIMAGE_FORMAT_PRIVATE PRIVATE} formats defined in {@link AImage.h}
+ * <td>non-{@link AIMAGE_FORMAT_PRIVATE} formats defined in {@link NdkImage.h}
* </td>
* <td>{@link AHARDWAREBUFFER_USAGE_CPU_READ_RARELY} or
* {@link AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN}</td>
@@ -441,6 +441,10 @@
AImageReader* reader,
AHardwareBuffer* buffer);
+/**
+ * A listener to the AHardwareBuffer removal event, use
+ * {@link AImageReader_setBufferRemovedListener} to register the listener object to AImageReader.
+ */
typedef struct AImageReader_BufferRemovedListener {
/// Optional application context passed as the first parameter of the callback.
void* context;
diff --git a/media/ndk/include/media/NdkMediaError.h b/media/ndk/include/media/NdkMediaError.h
index 2be1d6e..02fdc79 100644
--- a/media/ndk/include/media/NdkMediaError.h
+++ b/media/ndk/include/media/NdkMediaError.h
@@ -40,7 +40,11 @@
__BEGIN_DECLS
+/**
+ * Media error message types returned from NDK media functions.
+ */
typedef enum {
+ /** The requested media operation completed successfully. */
AMEDIA_OK = 0,
/**
@@ -55,14 +59,34 @@
AMEDIACODEC_ERROR_RECLAIMED = 1101,
AMEDIA_ERROR_BASE = -10000,
+
+ /** The called media function failed with an unknown error. */
AMEDIA_ERROR_UNKNOWN = AMEDIA_ERROR_BASE,
+
+ /** The input media data is corrupt or incomplete. */
AMEDIA_ERROR_MALFORMED = AMEDIA_ERROR_BASE - 1,
+
+ /** The required operation or media formats are not supported. */
AMEDIA_ERROR_UNSUPPORTED = AMEDIA_ERROR_BASE - 2,
+
+ /** An invalid (or already closed) object is used in the function call. */
AMEDIA_ERROR_INVALID_OBJECT = AMEDIA_ERROR_BASE - 3,
+
+ /** At least one of the invalid parameters is used. */
AMEDIA_ERROR_INVALID_PARAMETER = AMEDIA_ERROR_BASE - 4,
+
+ /** The media object is not in the right state for the required operation. */
AMEDIA_ERROR_INVALID_OPERATION = AMEDIA_ERROR_BASE - 5,
+
+ /** Media stream ends while processing the requested operation. */
AMEDIA_ERROR_END_OF_STREAM = AMEDIA_ERROR_BASE - 6,
+
+ /** An Error occurred when the Media object is carrying IO operation. */
AMEDIA_ERROR_IO = AMEDIA_ERROR_BASE - 7,
+
+ /** The required operation would have to be blocked (on I/O or others),
+ * but blocking is not enabled.
+ */
AMEDIA_ERROR_WOULD_BLOCK = AMEDIA_ERROR_BASE - 8,
AMEDIA_DRM_ERROR_BASE = -20000,
@@ -77,10 +101,20 @@
AMEDIA_DRM_LICENSE_EXPIRED = AMEDIA_DRM_ERROR_BASE - 9,
AMEDIA_IMGREADER_ERROR_BASE = -30000,
+
+ /** There are no more image buffers to read/write image data. */
AMEDIA_IMGREADER_NO_BUFFER_AVAILABLE = AMEDIA_IMGREADER_ERROR_BASE - 1,
+
+ /** The AImage object has used up the allowed maximum image buffers. */
AMEDIA_IMGREADER_MAX_IMAGES_ACQUIRED = AMEDIA_IMGREADER_ERROR_BASE - 2,
+
+ /** The required image buffer could not be locked to read. */
AMEDIA_IMGREADER_CANNOT_LOCK_IMAGE = AMEDIA_IMGREADER_ERROR_BASE - 3,
+
+ /** The media data or buffer could not be unlocked. */
AMEDIA_IMGREADER_CANNOT_UNLOCK_IMAGE = AMEDIA_IMGREADER_ERROR_BASE - 4,
+
+ /** The media/buffer needs to be locked to perform the required operation. */
AMEDIA_IMGREADER_IMAGE_NOT_LOCKED = AMEDIA_IMGREADER_ERROR_BASE - 5,
} media_status_t;
diff --git a/media/ndk/include/media/NdkMediaFormat.h b/media/ndk/include/media/NdkMediaFormat.h
index 476bbd9..fbd855d 100644
--- a/media/ndk/include/media/NdkMediaFormat.h
+++ b/media/ndk/include/media/NdkMediaFormat.h
@@ -307,6 +307,9 @@
extern const char* AMEDIAFORMAT_KEY_THUMBNAIL_CSD_AV1C __INTRODUCED_IN(31);
extern const char* AMEDIAFORMAT_KEY_XMP_OFFSET __INTRODUCED_IN(31);
extern const char* AMEDIAFORMAT_KEY_XMP_SIZE __INTRODUCED_IN(31);
+extern const char* AMEDIAFORMAT_KEY_SAMPLE_FILE_OFFSET __INTRODUCED_IN(31);
+extern const char* AMEDIAFORMAT_KEY_LAST_SAMPLE_INDEX_IN_CHUNK __INTRODUCED_IN(31);
+extern const char* AMEDIAFORMAT_KEY_SAMPLE_TIME_BEFORE_APPEND __INTRODUCED_IN(31);
extern const char* AMEDIAFORMAT_VIDEO_QP_B_MAX __INTRODUCED_IN(31);
extern const char* AMEDIAFORMAT_VIDEO_QP_B_MIN __INTRODUCED_IN(31);
diff --git a/media/ndk/include/media/NdkMediaMuxer.h b/media/ndk/include/media/NdkMediaMuxer.h
index 519e249..d7eccb8 100644
--- a/media/ndk/include/media/NdkMediaMuxer.h
+++ b/media/ndk/include/media/NdkMediaMuxer.h
@@ -54,6 +54,17 @@
AMEDIAMUXER_OUTPUT_FORMAT_THREE_GPP = 2,
} OutputFormat;
+typedef enum {
+ /* Last group of pictures(GOP) of video track can be incomplete, so it would be safe to
+ * scrap that and rewrite. If both audio and video tracks are present in a file, then
+ * samples of audio track after last GOP of video would be scrapped too.
+ * If only audio track is present, then no sample would be discarded.
+ */
+ AMEDIAMUXER_APPEND_IGNORE_LAST_VIDEO_GOP = 0,
+ // Keep all existing samples as it is and append new samples after that only.
+ AMEDIAMUXER_APPEND_TO_EXISTING_DATA = 1,
+} AppendMode;
+
/**
* Create new media muxer.
*
@@ -138,6 +149,46 @@
size_t trackIdx, const uint8_t *data,
const AMediaCodecBufferInfo *info) __INTRODUCED_IN(21);
+/**
+ * Creates a new media muxer for appending data to an existing MPEG4 file.
+ * This is a synchronous API call and could take a while to return if the existing file is large.
+ * Only works for MPEG4 files matching one of the following characteristics:
+ * <ul>
+ * <li>a single audio track.</li>
+ * <li>a single video track.</li>
+ * <li>a single audio and a single video track.</li>
+ * </ul>
+ * @param fd Must be opened with read and write permission. Does not take ownership of
+ * this fd i.e., caller is responsible for closing fd.
+ * @param mode Specifies how data will be appended; the AppendMode enum describes
+ * the possible methods for appending..
+ * @return Pointer to AMediaMuxer if the file(fd) has tracks already, otherwise, nullptr.
+ * {@link AMediaMuxer_delete} should be used to free the returned pointer.
+ *
+ * Available since API level 31.
+ */
+AMediaMuxer* AMediaMuxer_append(int fd, AppendMode mode) __INTRODUCED_IN(31);
+
+/**
+ * Returns the number of tracks added in the file passed to {@link AMediaMuxer_new} or
+ * the number of existing tracks in the file passed to {@link AMediaMuxer_append}.
+ * Should be called in INITIALIZED or STARTED state, otherwise returns -1.
+ *
+ * Available since API level 31.
+ */
+ssize_t AMediaMuxer_getTrackCount(AMediaMuxer*) __INTRODUCED_IN(31);
+
+/**
+ * Returns AMediaFormat of the added track with index idx in the file passed to
+ * {@link AMediaMuxer_new} or the AMediaFormat of the existing track with index idx
+ * in the file passed to {@link AMediaMuxer_append}.
+ * Should be called in INITIALIZED or STARTED state, otherwise returns nullptr.
+ * {@link AMediaFormat_delete} should be used to free the returned pointer.
+ *
+ * Available since API level 31.
+ */
+AMediaFormat* AMediaMuxer_getTrackFormat(AMediaMuxer* muxer, size_t idx) __INTRODUCED_IN(31);
+
__END_DECLS
#endif // _NDK_MEDIA_MUXER_H
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index eead681..7e9e57e 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -109,6 +109,7 @@
AMEDIAFORMAT_KEY_IS_SYNC_FRAME; # var introduced=29
AMEDIAFORMAT_KEY_I_FRAME_INTERVAL; # var introduced=21
AMEDIAFORMAT_KEY_LANGUAGE; # var introduced=21
+ AMEDIAFORMAT_KEY_LAST_SAMPLE_INDEX_IN_CHUNK; # var introduced=31
AMEDIAFORMAT_KEY_LATENCY; # var introduced=28
AMEDIAFORMAT_KEY_LEVEL; # var introduced=28
AMEDIAFORMAT_KEY_LOCATION; # var introduced=29
@@ -134,6 +135,8 @@
AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP; # var introduced=21
AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER; # var introduced=21
AMEDIAFORMAT_KEY_ROTATION; # var introduced=28
+ AMEDIAFORMAT_KEY_SAMPLE_FILE_OFFSET; # var introduced=31
+ AMEDIAFORMAT_KEY_SAMPLE_TIME_BEFORE_APPEND; # var introduced=31
AMEDIAFORMAT_KEY_SAMPLE_RATE; # var introduced=21
AMEDIAFORMAT_KEY_SAR_HEIGHT; # var introduced=29
AMEDIAFORMAT_KEY_SAR_WIDTH; # var introduced=29
@@ -286,7 +289,10 @@
AMediaFormat_setString;
AMediaFormat_toString;
AMediaMuxer_addTrack;
+ AMediaMuxer_append; # introduced=31
AMediaMuxer_delete;
+ AMediaMuxer_getTrackCount; # introduced=31
+ AMediaMuxer_getTrackFormat; # introduced=31
AMediaMuxer_new;
AMediaMuxer_setLocation;
AMediaMuxer_setOrientationHint;
diff --git a/media/tests/SampleVideoEncoder/README.md b/media/tests/SampleVideoEncoder/README.md
index 074c939..2e275c5 100644
--- a/media/tests/SampleVideoEncoder/README.md
+++ b/media/tests/SampleVideoEncoder/README.md
@@ -2,7 +2,7 @@
This is a sample android application for encoding AVC/HEVC streams with B-Frames enabled. It uses MediaRecorder APIs to record B-frames enabled video from camera2 input and MediaCodec APIs to encode reference test vector using input surface.
-This page describes how to get started with the Encoder App.
+This page describes how to get started with the Encoder App and how to run the tests for it.
# Getting Started
@@ -33,6 +33,17 @@
After installing the app, a TextureView showing camera preview is dispalyed on one third of the screen. It also features checkboxes to select either avc/hevc and hw/sw codecs. It also has an option to select either MediaRecorder APIs or MediaCodec, along with the 'Start' button to start/stop recording.
+# Running Tests
+
+The app also contains a test, which will test the MediaCodec APIs for encoding avc/hevc streams with B-frames enabled. This does not require us to use application UI.
+
+## Running the tests using atest
+Note that atest command will install the SampleVideoEncoder app on the device.
+
+Command to run the tests:
+```
+atest SampleVideoEncoder
+```
# Ouput
@@ -40,3 +51,6 @@
```
/storage/emulated/0/Android/data/com.android.media.samplevideoencoder/files/
```
+
+The total number of I-frames, P-frames and B-frames after encoding has been done using MediaCodec APIs are displayed on the screen.
+The results of the tests can be obtained from the logcats of the test.
diff --git a/media/tests/SampleVideoEncoder/app/Android.bp b/media/tests/SampleVideoEncoder/app/Android.bp
index 3a66955..58b219b 100644
--- a/media/tests/SampleVideoEncoder/app/Android.bp
+++ b/media/tests/SampleVideoEncoder/app/Android.bp
@@ -23,7 +23,7 @@
default_applicable_licenses: ["frameworks_av_license"],
}
-android_app {
+android_test {
name: "SampleVideoEncoder",
manifest: "src/main/AndroidManifest.xml",
@@ -41,6 +41,10 @@
"androidx.annotation_annotation",
"androidx.appcompat_appcompat",
"androidx-constraintlayout_constraintlayout",
+ "junit",
+ "androidx.test.core",
+ "androidx.test.runner",
+ "hamcrest-library",
],
javacflags: [
diff --git a/media/tests/SampleVideoEncoder/app/AndroidTest.xml b/media/tests/SampleVideoEncoder/app/AndroidTest.xml
new file mode 100644
index 0000000..91f4304
--- /dev/null
+++ b/media/tests/SampleVideoEncoder/app/AndroidTest.xml
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2021 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration description="Runs SampleVideoEncoder Tests">
+ <target_preparer class="com.android.tradefed.targetprep.TestAppInstallSetup">
+ <option name="cleanup-apks" value="false" />
+ <option name="test-file-name" value="SampleVideoEncoder.apk" />
+ </target_preparer>
+
+ <option name="test-tag" value="SampleVideoEncoder" />
+ <test class="com.android.tradefed.testtype.AndroidJUnitTest" >
+ <option name="package" value="com.android.media.samplevideoencoder" />
+ <option name="runner" value="androidx.test.runner.AndroidJUnitRunner" />
+ <option name="hidden-api-checks" value="false"/>
+ </test>
+</configuration>
diff --git a/media/tests/SampleVideoEncoder/app/src/androidTest/java/com/android/media/samplevideoencoder/tests/SampleVideoEncoderTest.java b/media/tests/SampleVideoEncoder/app/src/androidTest/java/com/android/media/samplevideoencoder/tests/SampleVideoEncoderTest.java
new file mode 100644
index 0000000..1ef332e
--- /dev/null
+++ b/media/tests/SampleVideoEncoder/app/src/androidTest/java/com/android/media/samplevideoencoder/tests/SampleVideoEncoderTest.java
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.android.media.samplevideoencoder.tests;
+
+import androidx.test.platform.app.InstrumentationRegistry;
+
+import android.content.Context;
+import android.media.MediaFormat;
+import android.util.Log;
+
+import com.android.media.samplevideoencoder.MediaCodecSurfaceEncoder;
+import com.android.media.samplevideoencoder.R;
+
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertThat;
+
+@RunWith(Parameterized.class)
+public class SampleVideoEncoderTest {
+ private static final String TAG = SampleVideoEncoderTest.class.getSimpleName();
+ private final Context mContext;
+ private int mMaxBFrames;
+ private int mInputResId;
+ private String mMime;
+ private boolean mIsSoftwareEncoder;
+
+ @Parameterized.Parameters
+ public static Collection<Object[]> inputFiles() {
+ return Arrays.asList(new Object[][]{
+ // Parameters: MimeType, isSoftwareEncoder, maxBFrames
+ {MediaFormat.MIMETYPE_VIDEO_AVC, false, 1},
+ {MediaFormat.MIMETYPE_VIDEO_AVC, true, 1},
+ {MediaFormat.MIMETYPE_VIDEO_HEVC, false, 1},
+ {MediaFormat.MIMETYPE_VIDEO_HEVC, true, 1}});
+ }
+
+ public SampleVideoEncoderTest(String mimeType, boolean isSoftwareEncoder, int maxBFrames) {
+ this.mContext = InstrumentationRegistry.getInstrumentation().getTargetContext();
+ this.mInputResId = R.raw.crowd_1920x1080_25fps_4000kbps_h265;
+ this.mMime = mimeType;
+ this.mIsSoftwareEncoder = isSoftwareEncoder;
+ this.mMaxBFrames = maxBFrames;
+ }
+
+ private String getOutputPath() {
+ File dir = mContext.getExternalFilesDir(null);
+ if (dir == null) {
+ Log.e(TAG, "Cannot get external directory path to save output video");
+ return null;
+ }
+ String videoPath = dir.getAbsolutePath() + "/Video-" + System.currentTimeMillis() + ".mp4";
+ Log.i(TAG, "Output video is saved at: " + videoPath);
+ return videoPath;
+ }
+
+ @Test
+ public void testMediaSurfaceEncoder() throws IOException, InterruptedException {
+ String outputFilePath = getOutputPath();
+ MediaCodecSurfaceEncoder surfaceEncoder =
+ new MediaCodecSurfaceEncoder(mContext, mInputResId, mMime, mIsSoftwareEncoder,
+ outputFilePath, mMaxBFrames);
+ int encodingStatus = surfaceEncoder.startEncodingSurface();
+ assertThat(encodingStatus, is(equalTo(0)));
+ int[] frameNumArray = surfaceEncoder.getFrameTypes();
+ Log.i(TAG, "Results: I-Frames: " + frameNumArray[0] + "; P-Frames: " + frameNumArray[1] +
+ "\n " + "; B-Frames:" + frameNumArray[2]);
+ assertNotEquals("Encoder mime: " + mMime + " isSoftware: " + mIsSoftwareEncoder +
+ " failed to generate B Frames", frameNumArray[2], 0);
+ }
+}
diff --git a/media/tests/SampleVideoEncoder/app/src/main/AndroidManifest.xml b/media/tests/SampleVideoEncoder/app/src/main/AndroidManifest.xml
index ed668bb..b17541d 100644
--- a/media/tests/SampleVideoEncoder/app/src/main/AndroidManifest.xml
+++ b/media/tests/SampleVideoEncoder/app/src/main/AndroidManifest.xml
@@ -38,4 +38,8 @@
</activity>
</application>
+ <instrumentation android:name="androidx.test.runner.AndroidJUnitRunner"
+ android:targetPackage="com.android.media.samplevideoencoder"
+ android:label="SampleVideoEncoder Test"/>
+
</manifest>
\ No newline at end of file
diff --git a/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/MainActivity.java b/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/MainActivity.java
index 33e81bb..a7a353c 100644
--- a/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/MainActivity.java
+++ b/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/MainActivity.java
@@ -56,6 +56,7 @@
import android.util.Log;
import android.util.Size;
import android.widget.RadioGroup;
+import android.widget.TextView;
import android.widget.Toast;
import java.lang.ref.WeakReference;
@@ -80,6 +81,14 @@
private static final int VIDEO_BITRATE = 8000000 /* 8 Mbps */;
private static final int VIDEO_FRAMERATE = 30;
+ /**
+ * Constant values to frame types assigned here are internal to this app.
+ * These values does not correspond to the actual values defined in avc/hevc specifications.
+ */
+ public static final int FRAME_TYPE_I = 0;
+ public static final int FRAME_TYPE_P = 1;
+ public static final int FRAME_TYPE_B = 2;
+
private String mMime = MediaFormat.MIMETYPE_VIDEO_AVC;
private String mOutputVideoPath = null;
@@ -89,6 +98,7 @@
private boolean mIsRecording;
private AutoFitTextureView mTextureView;
+ private TextView mTextView;
private CameraDevice mCameraDevice;
private CameraCaptureSession mPreviewSession;
private CaptureRequest.Builder mPreviewBuilder;
@@ -101,6 +111,8 @@
private Button mStartButton;
+ private int[] mFrameTypeOccurrences;
+
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
@@ -129,6 +141,8 @@
final CheckBox checkBox_mr = findViewById(R.id.checkBox_media_recorder);
final CheckBox checkBox_mc = findViewById(R.id.checkBox_media_codec);
mTextureView = findViewById(R.id.texture);
+ mTextView = findViewById(R.id.textViewResults);
+
checkBox_mr.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
@@ -162,6 +176,7 @@
@Override
public void onClick(View v) {
if (v.getId() == R.id.start_button) {
+ mTextView.setText(null);
if (mIsMediaRecorder) {
if (mIsRecording) {
stopRecordingVideo();
@@ -198,6 +213,7 @@
mainActivity.mOutputVideoPath);
try {
encodingStatus = codecSurfaceEncoder.startEncodingSurface();
+ mainActivity.mFrameTypeOccurrences = codecSurfaceEncoder.getFrameTypes();
} catch (IOException | InterruptedException e) {
e.printStackTrace();
}
@@ -211,6 +227,13 @@
if (encodingStatus == 0) {
Toast.makeText(mainActivity.getApplicationContext(), "Encoding Completed",
Toast.LENGTH_SHORT).show();
+ mainActivity.mTextView.append("\n Encoded stream contains: ");
+ mainActivity.mTextView.append("\n Number of I-Frames: " +
+ mainActivity.mFrameTypeOccurrences[FRAME_TYPE_I]);
+ mainActivity.mTextView.append("\n Number of P-Frames: " +
+ mainActivity.mFrameTypeOccurrences[FRAME_TYPE_P]);
+ mainActivity.mTextView.append("\n Number of B-Frames: " +
+ mainActivity.mFrameTypeOccurrences[FRAME_TYPE_B]);
} else {
Toast.makeText(mainActivity.getApplicationContext(),
"Error occurred while " + "encoding", Toast.LENGTH_SHORT).show();
diff --git a/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/MediaCodecSurfaceEncoder.java b/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/MediaCodecSurfaceEncoder.java
index 146a475..011c38c 100644
--- a/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/MediaCodecSurfaceEncoder.java
+++ b/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/MediaCodecSurfaceEncoder.java
@@ -31,10 +31,14 @@
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
+import java.util.Arrays;
+
+import static com.android.media.samplevideoencoder.MainActivity.FRAME_TYPE_B;
+import static com.android.media.samplevideoencoder.MainActivity.FRAME_TYPE_I;
+import static com.android.media.samplevideoencoder.MainActivity.FRAME_TYPE_P;
public class MediaCodecSurfaceEncoder {
private static final String TAG = MediaCodecSurfaceEncoder.class.getSimpleName();
-
private static final boolean DEBUG = false;
private static final int VIDEO_BITRATE = 8000000 /*8 Mbps*/;
private static final int VIDEO_FRAMERATE = 30;
@@ -44,6 +48,8 @@
private final String mMime;
private final String mOutputPath;
private int mTrackID = -1;
+ private int mFrameNum = 0;
+ private int[] mFrameTypeOccurrences = {0, 0, 0};
private Surface mSurface;
private MediaExtractor mExtractor;
@@ -128,8 +134,10 @@
mEncoder.reset();
mSurface.release();
mSurface = null;
+ Log.i(TAG, "Number of I-frames = " + mFrameTypeOccurrences[FRAME_TYPE_I]);
+ Log.i(TAG, "Number of P-frames = " + mFrameTypeOccurrences[FRAME_TYPE_P]);
+ Log.i(TAG, "Number of B-frames = " + mFrameTypeOccurrences[FRAME_TYPE_B]);
}
-
mEncoder.release();
mDecoder.release();
mExtractor.release();
@@ -193,6 +201,8 @@
mSawEncOutputEOS = false;
mDecOutputCount = 0;
mEncOutputCount = 0;
+ mFrameNum = 0;
+ Arrays.fill(mFrameTypeOccurrences, 0);
}
private void configureCodec(MediaFormat decFormat, MediaFormat encFormat) {
@@ -336,6 +346,21 @@
}
if (info.size > 0) {
ByteBuffer buf = mEncoder.getOutputBuffer(bufferIndex);
+ // Parse the buffer to get the frame type
+ if (DEBUG) Log.d(TAG, "[ Frame : " + (mFrameNum++) + " ]");
+ int frameTypeResult = -1;
+ if (mMime == MediaFormat.MIMETYPE_VIDEO_AVC) {
+ frameTypeResult = NalUnitUtil.getStandardizedFrameTypesFromAVC(buf);
+ } else if (mMime == MediaFormat.MIMETYPE_VIDEO_HEVC){
+ frameTypeResult = NalUnitUtil.getStandardizedFrameTypesFromHEVC(buf);
+ } else {
+ Log.e(TAG, "Mime type " + mMime + " is not supported.");
+ return;
+ }
+ if (frameTypeResult != -1) {
+ mFrameTypeOccurrences[frameTypeResult]++;
+ }
+
if (mMuxer != null) {
if (mTrackID == -1) {
mTrackID = mMuxer.addTrack(mEncoder.getOutputFormat());
@@ -353,4 +378,8 @@
private boolean hasSeenError() {
return mAsyncHandleDecoder.hasSeenError() || mAsyncHandleEncoder.hasSeenError();
}
+
+ public int[] getFrameTypes() {
+ return mFrameTypeOccurrences;
+ }
}
diff --git a/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/NalUnitUtil.java b/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/NalUnitUtil.java
new file mode 100644
index 0000000..efff4fd
--- /dev/null
+++ b/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/NalUnitUtil.java
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media.samplevideoencoder;
+
+import android.util.Log;
+
+import java.nio.ByteBuffer;
+
+import static com.android.media.samplevideoencoder.MainActivity.FRAME_TYPE_B;
+import static com.android.media.samplevideoencoder.MainActivity.FRAME_TYPE_I;
+import static com.android.media.samplevideoencoder.MainActivity.FRAME_TYPE_P;
+
+public class NalUnitUtil {
+ private static final String TAG = MediaCodecSurfaceEncoder.class.getSimpleName();
+ private static final boolean DEBUG = false;
+
+ public static int findNalUnit(byte[] dataArray, int pos, int limit) {
+ int startOffset = 0;
+ if (limit - pos < 4) {
+ return startOffset;
+ }
+ if (dataArray[pos] == 0 && dataArray[pos + 1] == 0 && dataArray[pos + 2] == 1) {
+ startOffset = 3;
+ } else {
+ if (dataArray[pos] == 0 && dataArray[pos + 1] == 0 && dataArray[pos + 2] == 0 &&
+ dataArray[pos + 3] == 1) {
+ startOffset = 4;
+ }
+ }
+ return startOffset;
+ }
+
+ private static int getAVCNalUnitType(byte[] dataArray, int nalUnitOffset) {
+ return dataArray[nalUnitOffset] & 0x1F;
+ }
+
+ private static int parseAVCNALUnitData(byte[] dataArray, int offset, int limit) {
+ ParsableBitArray bitArray = new ParsableBitArray(dataArray);
+ bitArray.reset(dataArray, offset, limit);
+
+ bitArray.skipBit(); // forbidden_zero_bit
+ bitArray.readBits(2); // nal_ref_idc
+ bitArray.skipBits(5); // nal_unit_type
+
+ bitArray.readUEV(); // first_mb_in_slice
+ if (!bitArray.canReadUEV()) {
+ return -1;
+ }
+ int sliceType = bitArray.readUEV();
+ if (DEBUG) Log.d(TAG, "slice_type = " + sliceType);
+ if (sliceType == 0) {
+ return FRAME_TYPE_P;
+ } else if (sliceType == 1) {
+ return FRAME_TYPE_B;
+ } else if (sliceType == 2) {
+ return FRAME_TYPE_I;
+ } else {
+ return -1;
+ }
+ }
+
+ private static int getHEVCNalUnitType(byte[] dataArray, int nalUnitOffset) {
+ return (dataArray[nalUnitOffset] & 0x7E) >> 1;
+ }
+
+ private static int parseHEVCNALUnitData(byte[] dataArray, int offset, int limit,
+ int nalUnitType) {
+ // nal_unit_type values from H.265/HEVC Table 7-1.
+ final int BLA_W_LP = 16;
+ final int RSV_IRAP_VCL23 = 23;
+
+ ParsableBitArray bitArray = new ParsableBitArray(dataArray);
+ bitArray.reset(dataArray, offset, limit);
+
+ bitArray.skipBit(); // forbidden zero bit
+ bitArray.readBits(6); // nal_unit_header
+ bitArray.readBits(6); // nuh_layer_id
+ bitArray.readBits(3); // nuh_temporal_id_plus1
+
+ // Parsing slice_segment_header values from H.265/HEVC Table 7.3.6.1
+ boolean first_slice_segment = bitArray.readBit(); // first_slice_segment_in_pic_flag
+ if (!first_slice_segment) return -1;
+ if (nalUnitType >= BLA_W_LP && nalUnitType <= RSV_IRAP_VCL23) {
+ bitArray.readBit(); // no_output_of_prior_pics_flag
+ }
+ bitArray.readUEV(); // slice_pic_parameter_set_id
+ // Assume num_extra_slice_header_bits element of PPS data to be 0
+ int sliceType = bitArray.readUEV();
+ if (DEBUG) Log.d(TAG, "slice_type = " + sliceType);
+ if (sliceType == 0) {
+ return FRAME_TYPE_B;
+ } else if (sliceType == 1) {
+ return FRAME_TYPE_P;
+ } else if (sliceType == 2) {
+ return FRAME_TYPE_I;
+ } else {
+ return -1;
+ }
+ }
+
+ public static int getStandardizedFrameTypesFromAVC(ByteBuffer buf) {
+ int limit = buf.limit();
+ byte[] dataArray = new byte[buf.remaining()];
+ buf.get(dataArray);
+ int frameType = -1;
+ for (int pos = 0; pos + 3 < limit; ) {
+ int startOffset = NalUnitUtil.findNalUnit(dataArray, pos, limit);
+ if (startOffset != 0) {
+ int nalUnitType = getAVCNalUnitType(dataArray, (pos + startOffset));
+ if (DEBUG) {
+ Log.d(TAG, "NalUnitOffset = " + (pos + startOffset));
+ Log.d(TAG, "NalUnitType = " + nalUnitType);
+ }
+ // SLICE_NAL = 1; IDR_SLICE_NAL = 5
+ if (nalUnitType == 1 || nalUnitType == 5) {
+ frameType = parseAVCNALUnitData(dataArray, (pos + startOffset),
+ (limit - pos - startOffset));
+ break;
+ }
+ pos += 3;
+ } else {
+ pos++;
+ }
+ }
+ return frameType;
+ }
+
+ public static int getStandardizedFrameTypesFromHEVC(ByteBuffer buf) {
+ int limit = buf.limit();
+ byte[] dataArray = new byte[buf.remaining()];
+ buf.get(dataArray);
+ int frameType = -1;
+ for (int pos = 0; pos + 3 < limit; ) {
+ int startOffset = NalUnitUtil.findNalUnit(dataArray, pos, limit);
+ if (startOffset != 0) {
+ int nalUnitType = NalUnitUtil.getHEVCNalUnitType(dataArray, (pos + startOffset));
+ if (DEBUG) {
+ Log.d(TAG, "NalUnitOffset = " + (pos + startOffset));
+ Log.d(TAG, "NalUnitType = " + nalUnitType);
+ }
+ // Parse NALUnits containing slice_headers which lies in the range of 0 to 21
+ if (nalUnitType >= 0 && nalUnitType <= 21) {
+ frameType = parseHEVCNALUnitData(dataArray, (pos + startOffset),
+ (limit - pos - startOffset), nalUnitType);
+ break;
+ }
+ pos += 3;
+ } else {
+ pos++;
+ }
+ }
+ return frameType;
+ }
+}
diff --git a/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/ParsableBitArray.java b/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/ParsableBitArray.java
new file mode 100644
index 0000000..e4bfaa3
--- /dev/null
+++ b/media/tests/SampleVideoEncoder/app/src/main/java/com/android/media/samplevideoencoder/ParsableBitArray.java
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media.samplevideoencoder;
+
+public class ParsableBitArray {
+ public byte[] data;
+ private int byteOffset;
+ private int bitOffset;
+ private int byteLimit;
+
+ public ParsableBitArray(byte[] dataArray) {
+ this(dataArray, dataArray.length);
+ }
+
+ public ParsableBitArray(byte[] dataArray, int limit) {
+ this.data = dataArray;
+ byteLimit = limit;
+ }
+
+ public void reset(byte[] data, int offset, int limit) {
+ this.data = data;
+ byteOffset = offset;
+ bitOffset = 0;
+ byteLimit = limit;
+ }
+
+ public void skipBit() {
+ if (++bitOffset == 8) {
+ bitOffset = 0;
+ byteOffset++;
+ }
+ }
+
+ public void skipBits(int numBits) {
+ int numBytes = numBits / 8;
+ byteOffset += numBytes;
+ bitOffset += numBits - (numBytes * 8);
+ if (bitOffset > 7) {
+ byteOffset++;
+ bitOffset -= 8;
+ }
+ }
+
+ public boolean readBit() {
+ boolean returnValue = (data[byteOffset] & (0x80 >> bitOffset)) != 0;
+ skipBit();
+ return returnValue;
+ }
+
+ public int readBits(int numBits) {
+ if (numBits == 0) {
+ return 0;
+ }
+ int returnValue = 0;
+ bitOffset += numBits;
+ while (bitOffset > 8) {
+ bitOffset -= 8;
+ returnValue |= (data[byteOffset++] & 0xFF) << bitOffset;
+ }
+ returnValue |= (data[byteOffset] & 0xFF) >> (8 - bitOffset);
+ returnValue &= 0xFFFFFFFF >>> (32 - numBits);
+ if (bitOffset == 8) {
+ bitOffset = 0;
+ byteOffset++;
+ }
+ return returnValue;
+ }
+
+ public boolean canReadUEV() {
+ int initialByteOffset = byteOffset;
+ int initialBitOffset = bitOffset;
+ int leadingZeros = 0;
+ while (byteOffset < byteLimit && !readBit()) {
+ leadingZeros++;
+ }
+ boolean hitLimit = byteOffset == byteLimit;
+ byteOffset = initialByteOffset;
+ bitOffset = initialBitOffset;
+ return !hitLimit && canReadBits(leadingZeros * 2 + 1);
+ }
+
+ public int readUEV() {
+ int leadingZeros = 0;
+ while (!readBit()) {
+ leadingZeros++;
+ }
+ return (1 << leadingZeros) - 1 + (leadingZeros > 0 ? readBits(leadingZeros) : 0);
+ }
+
+ public boolean canReadBits(int numBits) {
+ int oldByteOffset = byteOffset;
+ int numBytes = numBits / 8;
+ int newByteOffset = byteOffset + numBytes;
+ int newBitOffset = bitOffset + numBits - (numBytes * 8);
+ if (newBitOffset > 7) {
+ newByteOffset++;
+ newBitOffset -= 8;
+ }
+ for (int i = oldByteOffset + 1; i <= newByteOffset && newByteOffset < byteLimit; i++) {
+ if (shouldSkipByte(i)) {
+ // Skip the byte and check three bytes ahead.
+ newByteOffset++;
+ i += 2;
+ }
+ }
+ return newByteOffset < byteLimit || (newByteOffset == byteLimit && newBitOffset == 0);
+ }
+
+ private boolean shouldSkipByte(int offset) {
+ return (2 <= offset && offset < byteLimit && data[offset] == (byte) 0x03 &&
+ data[offset - 2] == (byte) 0x00 && data[offset - 1] == (byte) 0x00);
+ }
+
+}
diff --git a/media/tests/SampleVideoEncoder/app/src/main/res/layout/activity_main.xml b/media/tests/SampleVideoEncoder/app/src/main/res/layout/activity_main.xml
index 164e02a..017012d 100644
--- a/media/tests/SampleVideoEncoder/app/src/main/res/layout/activity_main.xml
+++ b/media/tests/SampleVideoEncoder/app/src/main/res/layout/activity_main.xml
@@ -124,4 +124,15 @@
</FrameLayout>
+ <TextView
+ android:id="@+id/textViewResults"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content"
+ android:layout_marginTop="10dp"
+ android:fontFamily="sans-serif-medium"
+ android:textSize="18sp"
+ android:textStyle="normal"
+ app:layout_constraintStart_toStartOf="parent"
+ app:layout_constraintTop_toBottomOf = "@+id/frameLayout2" />
+
</androidx.constraintlayout.widget.ConstraintLayout>
diff --git a/media/utils/Android.bp b/media/utils/Android.bp
index 26cdc3a..9e48c1f 100644
--- a/media/utils/Android.bp
+++ b/media/utils/Android.bp
@@ -37,6 +37,8 @@
],
static_libs: [
"libc_malloc_debug_backtrace",
+ "libbatterystats_aidl",
+ "libprocessinfoservice_aidl",
],
shared_libs: [
"libaudioclient_aidl_conversion",
@@ -44,12 +46,16 @@
"libbinder",
"libcutils",
"liblog",
+ "libpermission",
"libutils",
"libhidlbase",
"android.hardware.graphics.bufferqueue@1.0",
"android.hidl.token@1.0-utils",
"media_permission-aidl-cpp",
],
+ export_static_lib_headers: [
+ "libbatterystats_aidl",
+ ],
logtags: ["EventLogTags.logtags"],
diff --git a/media/utils/ProcessInfo.cpp b/media/utils/ProcessInfo.cpp
index 19225d3..e212794 100644
--- a/media/utils/ProcessInfo.cpp
+++ b/media/utils/ProcessInfo.cpp
@@ -21,9 +21,9 @@
#include <media/stagefright/ProcessInfo.h>
#include <binder/IPCThreadState.h>
-#include <binder/IProcessInfoService.h>
#include <binder/IServiceManager.h>
#include <private/android_filesystem_config.h>
+#include <processinfo/IProcessInfoService.h>
namespace android {
diff --git a/media/utils/fuzzers/Android.bp b/media/utils/fuzzers/Android.bp
index 187ef7c..b245834 100644
--- a/media/utils/fuzzers/Android.bp
+++ b/media/utils/fuzzers/Android.bp
@@ -10,6 +10,7 @@
cc_defaults {
name: "libmediautils_fuzzer_defaults",
shared_libs: [
+ "libbatterystats_aidl",
"libbinder",
"libcutils",
"liblog",
diff --git a/media/utils/fuzzers/SchedulingPolicyServiceFuzz.cpp b/media/utils/fuzzers/SchedulingPolicyServiceFuzz.cpp
index 4521853..130feee 100644
--- a/media/utils/fuzzers/SchedulingPolicyServiceFuzz.cpp
+++ b/media/utils/fuzzers/SchedulingPolicyServiceFuzz.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
#define LOG_TAG "BatteryNotifierFuzzer"
-#include <binder/IBatteryStats.h>
+#include <batterystats/IBatteryStats.h>
#include <binder/IServiceManager.h>
#include <utils/String16.h>
#include <android/log.h>
diff --git a/media/utils/include/mediautils/BatteryNotifier.h b/media/utils/include/mediautils/BatteryNotifier.h
index a4e42ad..3812d7a 100644
--- a/media/utils/include/mediautils/BatteryNotifier.h
+++ b/media/utils/include/mediautils/BatteryNotifier.h
@@ -17,7 +17,7 @@
#ifndef MEDIA_BATTERY_NOTIFIER_H
#define MEDIA_BATTERY_NOTIFIER_H
-#include <binder/IBatteryStats.h>
+#include <batterystats/IBatteryStats.h>
#include <utils/Singleton.h>
#include <utils/String8.h>
diff --git a/services/audioflinger/Android.bp b/services/audioflinger/Android.bp
index 2294c49..a7d47fb 100644
--- a/services/audioflinger/Android.bp
+++ b/services/audioflinger/Android.bp
@@ -74,6 +74,7 @@
"libmediautils",
"libnbaio",
"libnblog",
+ "libpermission",
"libpowermanager",
"libmediautils",
"libmemunreachable",
@@ -95,6 +96,7 @@
],
export_shared_lib_headers: [
+ "libpermission",
"media_permission-aidl-cpp",
],
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 7a89805..7cdac30 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -278,6 +278,21 @@
return NO_ERROR;
}
+status_t AudioFlinger::setVibratorInfos(
+ const std::vector<media::AudioVibratorInfo>& vibratorInfos) {
+ Mutex::Autolock _l(mLock);
+ mAudioVibratorInfos = vibratorInfos;
+ return NO_ERROR;
+}
+
+// getDefaultVibratorInfo_l must be called with AudioFlinger lock held.
+const media::AudioVibratorInfo* AudioFlinger::getDefaultVibratorInfo_l() {
+ if (mAudioVibratorInfos.empty()) {
+ return nullptr;
+ }
+ return &mAudioVibratorInfos.front();
+}
+
AudioFlinger::~AudioFlinger()
{
while (!mRecordThreads.isEmpty()) {
@@ -4122,7 +4137,8 @@
case TransactionCode::SET_MIC_MUTE:
case TransactionCode::SET_LOW_RAM_DEVICE:
case TransactionCode::SYSTEM_READY:
- case TransactionCode::SET_AUDIO_HAL_PIDS: {
+ case TransactionCode::SET_AUDIO_HAL_PIDS:
+ case TransactionCode::SET_VIBRATOR_INFOS: {
if (!isServiceUid(IPCThreadState::self()->getCallingUid())) {
ALOGW("%s: transaction %d received from PID %d unauthorized UID %d",
__func__, code, IPCThreadState::self()->getCallingPid(),
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 1cfdffc..a980752 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -267,6 +267,8 @@
virtual status_t setAudioHalPids(const std::vector<pid_t>& pids);
+ virtual status_t setVibratorInfos(const std::vector<media::AudioVibratorInfo>& vibratorInfos);
+
status_t onPreTransact(TransactionCode code, const Parcel& data, uint32_t flags) override;
// end of IAudioFlinger interface
@@ -296,6 +298,8 @@
void updateDownStreamPatches_l(const struct audio_patch *patch,
const std::set<audio_io_handle_t> streams);
+ const media::AudioVibratorInfo* getDefaultVibratorInfo_l();
+
private:
// FIXME The 400 is temporarily too high until a leak of writers in media.log is fixed.
static const size_t kLogMemorySize = 400 * 1024;
@@ -971,6 +975,8 @@
SimpleLog mAppSetParameterLog;
SimpleLog mSystemSetParameterLog;
+ std::vector<media::AudioVibratorInfo> mAudioVibratorInfos;
+
static inline constexpr const char *mMetricsId = AMEDIAMETRICS_KEY_AUDIO_FLINGER;
};
diff --git a/services/audioflinger/AudioStreamOut.cpp b/services/audioflinger/AudioStreamOut.cpp
index 7e06096..d8565bd 100644
--- a/services/audioflinger/AudioStreamOut.cpp
+++ b/services/audioflinger/AudioStreamOut.cpp
@@ -173,22 +173,15 @@
return status;
}
-audio_format_t AudioStreamOut::getFormat() const
+audio_config_base_t AudioStreamOut::getAudioProperties() const
{
- audio_format_t result;
- return stream->getFormat(&result) == OK ? result : AUDIO_FORMAT_INVALID;
-}
-
-uint32_t AudioStreamOut::getSampleRate() const
-{
- uint32_t result;
- return stream->getSampleRate(&result) == OK ? result : 0;
-}
-
-audio_channel_mask_t AudioStreamOut::getChannelMask() const
-{
- audio_channel_mask_t result;
- return stream->getChannelMask(&result) == OK ? result : AUDIO_CHANNEL_INVALID;
+ audio_config_base_t result = AUDIO_CONFIG_BASE_INITIALIZER;
+ if (stream->getAudioProperties(&result) != OK) {
+ result.sample_rate = 0;
+ result.channel_mask = AUDIO_CHANNEL_INVALID;
+ result.format = AUDIO_FORMAT_INVALID;
+ }
+ return result;
}
int AudioStreamOut::flush()
diff --git a/services/audioflinger/AudioStreamOut.h b/services/audioflinger/AudioStreamOut.h
index 16fbcf2..565f43a 100644
--- a/services/audioflinger/AudioStreamOut.h
+++ b/services/audioflinger/AudioStreamOut.h
@@ -81,22 +81,14 @@
virtual size_t getFrameSize() const { return mHalFrameSize; }
/**
- * @return format from the perspective of the application and the AudioFlinger.
+ * @return audio stream configuration: channel mask, format, sample rate:
+ * - channel mask from the perspective of the application and the AudioFlinger,
+ * The HAL is in stereo mode when playing multi-channel compressed audio over HDMI;
+ * - format from the perspective of the application and the AudioFlinger;
+ * - sample rate from the perspective of the application and the AudioFlinger,
+ * The HAL may be running at a higher sample rate if, for example, playing wrapped EAC3.
*/
- virtual audio_format_t getFormat() const;
-
- /**
- * The HAL may be running at a higher sample rate if, for example, playing wrapped EAC3.
- * @return sample rate from the perspective of the application and the AudioFlinger.
- */
- virtual uint32_t getSampleRate() const;
-
- /**
- * The HAL is in stereo mode when playing multi-channel compressed audio over HDMI.
- * @return channel mask from the perspective of the application and the AudioFlinger.
- */
- virtual audio_channel_mask_t getChannelMask() const;
-
+ virtual audio_config_base_t getAudioProperties() const;
virtual status_t flush();
virtual status_t standby();
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 031e0cf..d75b13b 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -1585,6 +1585,34 @@
return status;
}
+status_t AudioFlinger::EffectModule::setVibratorInfo(const media::AudioVibratorInfo* vibratorInfo)
+{
+ if (mStatus != NO_ERROR) {
+ return mStatus;
+ }
+ if (!isHapticGenerator()) {
+ ALOGW("Should not set vibrator info for effects that are not HapticGenerator");
+ return INVALID_OPERATION;
+ }
+
+ std::vector<uint8_t> request(
+ sizeof(effect_param_t) + sizeof(int32_t) + 2 * sizeof(float));
+ effect_param_t *param = (effect_param_t*) request.data();
+ param->psize = sizeof(int32_t);
+ param->vsize = 2 * sizeof(float);
+ *(int32_t*)param->data = HG_PARAM_VIBRATOR_INFO;
+ float* vibratorInfoPtr = reinterpret_cast<float*>(param->data + sizeof(int32_t));
+ vibratorInfoPtr[0] = vibratorInfo->resonantFrequency;
+ vibratorInfoPtr[1] = vibratorInfo->qFactor;
+ std::vector<uint8_t> response;
+ status_t status = command(EFFECT_CMD_SET_PARAM, request, sizeof(int32_t), &response);
+ if (status == NO_ERROR) {
+ LOG_ALWAYS_FATAL_IF(response.size() != sizeof(status_t));
+ status = *reinterpret_cast<const status_t*>(response.data());
+ }
+ return status;
+}
+
static std::string dumpInOutBuffer(bool isInput, const sp<EffectBufferHalInterface> &buffer) {
std::stringstream ss;
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index 8e82d53..9da95bc 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -258,6 +258,7 @@
bool isHapticGenerator() const;
status_t setHapticIntensity(int id, int intensity);
+ status_t setVibratorInfo(const media::AudioVibratorInfo* vibratorInfo);
void dump(int fd, const Vector<String16>& args);
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index 2e59baa..2436248 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -219,6 +219,10 @@
void flushAck();
bool isResumePending();
void resumeAck();
+ // For direct or offloaded tracks ensure that the pause state is acknowledged
+ // by the playback thread in case of an immediate flush.
+ bool isPausePending() const { return mPauseHwPending; }
+ void pauseAck();
void updateTrackFrameInfo(int64_t trackFramesReleased, int64_t sinkFramesWritten,
uint32_t halSampleRate, const ExtendedTimestamp &timeStamp);
@@ -314,6 +318,7 @@
sp<AudioTrackServerProxy> mAudioTrackServerProxy;
bool mResumeToStopping; // track was paused in stopping state.
bool mFlushHwPending; // track requests for thread flush
+ bool mPauseHwPending = false; // direct/offload track request for thread pause
audio_output_flags_t mFlags;
// If the last track change was notified to the client with readAndClearHasChanged
std::atomic_flag mChangeNotified = ATOMIC_FLAG_INIT;
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 7f91a54..997f24a 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -1439,6 +1439,16 @@
effect->setMode(mAudioFlinger->getMode());
effect->setAudioSource(mAudioSource);
}
+ if (effect->isHapticGenerator()) {
+ // TODO(b/184194057): Use the vibrator information from the vibrator that will be used
+ // for the HapticGenerator.
+ const media::AudioVibratorInfo* defaultVibratorInfo =
+ mAudioFlinger->getDefaultVibratorInfo_l();
+ if (defaultVibratorInfo != nullptr) {
+ // Only set the vibrator info when it is a valid one.
+ effect->setVibratorInfo(defaultVibratorInfo);
+ }
+ }
// create effect handle and connect it to effect module
handle = new EffectHandle(effect, client, effectClient, priority);
lStatus = handle->initCheck();
@@ -2757,8 +2767,9 @@
void AudioFlinger::PlaybackThread::readOutputParameters_l()
{
// unfortunately we have no way of recovering from errors here, hence the LOG_ALWAYS_FATAL
- mSampleRate = mOutput->getSampleRate();
- mChannelMask = mOutput->getChannelMask();
+ const audio_config_base_t audioConfig = mOutput->getAudioProperties();
+ mSampleRate = audioConfig.sample_rate;
+ mChannelMask = audioConfig.channel_mask;
if (!audio_is_output_channel(mChannelMask)) {
LOG_ALWAYS_FATAL("HAL channel mask %#x not valid for output", mChannelMask);
}
@@ -2771,11 +2782,11 @@
mBalance.setChannelMask(mChannelMask);
// Get actual HAL format.
- status_t result = mOutput->stream->getFormat(&mHALFormat);
+ status_t result = mOutput->stream->getAudioProperties(nullptr, nullptr, &mHALFormat);
LOG_ALWAYS_FATAL_IF(result != OK, "Error when retrieving output stream format: %d", result);
// Get format from the shim, which will be different than the HAL format
// if playing compressed audio over HDMI passthrough.
- mFormat = mOutput->getFormat();
+ mFormat = audioConfig.format;
if (!audio_is_valid_format(mFormat)) {
LOG_ALWAYS_FATAL("HAL format %#x not valid for output", mFormat);
}
@@ -5869,8 +5880,15 @@
sp<Track> l = mActiveTracks.getLatest();
bool last = l.get() == track;
- if (track->isPausing()) {
- track->setPaused();
+ if (track->isPausePending()) {
+ track->pauseAck();
+ // It is possible a track might have been flushed or stopped.
+ // Other operations such as flush pending might occur on the next prepare.
+ if (track->isPausing()) {
+ track->setPaused();
+ }
+ // Always perform pause, as an immediate flush will change
+ // the pause state to be no longer isPausing().
if (mHwSupportsPause && last && !mHwPaused) {
doHwPause = true;
mHwPaused = true;
@@ -6412,8 +6430,15 @@
continue;
}
- if (track->isPausing()) {
- track->setPaused();
+ if (track->isPausePending()) {
+ track->pauseAck();
+ // It is possible a track might have been flushed or stopped.
+ // Other operations such as flush pending might occur on the next prepare.
+ if (track->isPausing()) {
+ track->setPaused();
+ }
+ // Always perform pause if last, as an immediate flush will change
+ // the pause state to be no longer isPausing().
if (last) {
if (mHwSupportsPause && !mHwPaused) {
doHwPause = true;
@@ -8083,6 +8108,9 @@
{
ALOGV("RecordThread::getActiveMicrophones");
AutoMutex _l(mLock);
+ if (mInput == nullptr || mInput->stream == nullptr) {
+ return NO_INIT;
+ }
status_t status = mInput->stream->getActiveMicrophones(activeMicrophones);
return status;
}
@@ -8092,6 +8120,9 @@
{
ALOGV("setPreferredMicrophoneDirection(%d)", direction);
AutoMutex _l(mLock);
+ if (mInput == nullptr || mInput->stream == nullptr) {
+ return NO_INIT;
+ }
return mInput->stream->setPreferredMicrophoneDirection(direction);
}
@@ -8099,6 +8130,9 @@
{
ALOGV("setPreferredMicrophoneFieldDimension(%f)", zoom);
AutoMutex _l(mLock);
+ if (mInput == nullptr || mInput->stream == nullptr) {
+ return NO_INIT;
+ }
return mInput->stream->setPreferredMicrophoneFieldDimension(zoom);
}
@@ -8424,13 +8458,11 @@
}
if (reconfig) {
if (status == BAD_VALUE) {
- uint32_t sRate;
- audio_channel_mask_t channelMask;
- audio_format_t format;
- if (mInput->stream->getAudioProperties(&sRate, &channelMask, &format) == OK &&
- audio_is_linear_pcm(format) && audio_is_linear_pcm(reqFormat) &&
- sRate <= (AUDIO_RESAMPLER_DOWN_RATIO_MAX * samplingRate) &&
- audio_channel_count_from_in_mask(channelMask) <= FCC_8) {
+ audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
+ if (mInput->stream->getAudioProperties(&config) == OK &&
+ audio_is_linear_pcm(config.format) && audio_is_linear_pcm(reqFormat) &&
+ config.sample_rate <= (AUDIO_RESAMPLER_DOWN_RATIO_MAX * samplingRate) &&
+ audio_channel_count_from_in_mask(config.channel_mask) <= FCC_8) {
status = NO_ERROR;
}
}
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index db7528d..21651af 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -1219,6 +1219,9 @@
mState = PAUSING;
ALOGV("%s(%d): ACTIVE/RESUMING => PAUSING on thread %d",
__func__, mId, (int)mThreadIoHandle);
+ if (isOffloadedOrDirect()) {
+ mPauseHwPending = true;
+ }
playbackThread->broadcast_l();
break;
@@ -1306,6 +1309,11 @@
mFlushHwPending = false;
}
+void AudioFlinger::PlaybackThread::Track::pauseAck()
+{
+ mPauseHwPending = false;
+}
+
void AudioFlinger::PlaybackThread::Track::reset()
{
// Do not reset twice to avoid discarding data written just after a flush and before
diff --git a/services/audiopolicy/common/include/policy.h b/services/audiopolicy/common/include/policy.h
index 0537365..552919d 100644
--- a/services/audiopolicy/common/include/policy.h
+++ b/services/audiopolicy/common/include/policy.h
@@ -226,6 +226,8 @@
return AUDIO_DEVICE_OUT_SPEAKER_SAFE;
} else if (deviceTypes.count(AUDIO_DEVICE_OUT_HDMI_ARC) != 0) {
return AUDIO_DEVICE_OUT_HDMI_ARC;
+ } else if (deviceTypes.count(AUDIO_DEVICE_OUT_HDMI_EARC) != 0) {
+ return AUDIO_DEVICE_OUT_HDMI_EARC;
} else if (deviceTypes.count(AUDIO_DEVICE_OUT_AUX_LINE) != 0) {
return AUDIO_DEVICE_OUT_AUX_LINE;
} else if (deviceTypes.count(AUDIO_DEVICE_OUT_SPDIF) != 0) {
@@ -240,4 +242,4 @@
return a2dpDevices.empty() ? AUDIO_DEVICE_NONE : a2dpDevices[0];
}
}
-}
\ No newline at end of file
+}
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
index c6bdb04..c2a20c6 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
@@ -77,6 +77,7 @@
sp<DeviceDescriptor> getDeviceAndMixForInputSource(audio_source_t inputSource,
const DeviceVector &availableDeviceTypes,
+ uid_t uid,
sp<AudioPolicyMix> *policyMix) const;
/**
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index 05ec69e..20b4044 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -226,7 +226,9 @@
add(devices);
return size();
}
- return SortedVector::merge(devices);
+ ssize_t ret = SortedVector::merge(devices);
+ refreshTypes();
+ return ret;
}
/**
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index c024a85..b209a88 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -391,6 +391,7 @@
sp<DeviceDescriptor> AudioPolicyMixCollection::getDeviceAndMixForInputSource(
audio_source_t inputSource,
const DeviceVector &availDevices,
+ uid_t uid,
sp<AudioPolicyMix> *policyMix) const
{
for (size_t i = 0; i < size(); i++) {
@@ -402,7 +403,11 @@
if ((RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET == mix->mCriteria[j].mRule &&
mix->mCriteria[j].mValue.mSource == inputSource) ||
(RULE_EXCLUDE_ATTRIBUTE_CAPTURE_PRESET == mix->mCriteria[j].mRule &&
- mix->mCriteria[j].mValue.mSource != inputSource)) {
+ mix->mCriteria[j].mValue.mSource != inputSource) ||
+ (RULE_MATCH_UID == mix->mCriteria[j].mRule &&
+ mix->mCriteria[j].mValue.mUid == uid) ||
+ (RULE_EXCLUDE_UID == mix->mCriteria[j].mRule &&
+ mix->mCriteria[j].mValue.mUid != uid)) {
// assuming PolicyMix only for remote submix for input
// so mix->mDeviceType can only be AUDIO_DEVICE_OUT_REMOTE_SUBMIX
audio_devices_t device = AUDIO_DEVICE_IN_REMOTE_SUBMIX;
diff --git a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
index 562c213..84ed656 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
@@ -253,6 +253,18 @@
// Children: ModulesTraits, VolumeTraits, SurroundSoundTraits (optional)
};
+// Deleter using free() for use with std::unique_ptr<>. See also UniqueCPtr<> below.
+struct FreeDelete {
+ // NOTE: Deleting a const object is valid but free() takes a non-const pointer.
+ void operator()(const void* ptr) const {
+ free(const_cast<void*>(ptr));
+ }
+};
+
+// Alias for std::unique_ptr<> that uses the C function free() to delete objects.
+template <typename T>
+using UniqueCPtr = std::unique_ptr<T, FreeDelete>;
+
template <class T>
constexpr void (*xmlDeleter)(T* t);
template <>
@@ -608,7 +620,7 @@
}
// Tokenize and Convert Sources name to port pointer
PolicyAudioPortVector sources;
- std::unique_ptr<char[]> sourcesLiteral{strndup(
+ UniqueCPtr<char> sourcesLiteral{strndup(
sourcesAttr.c_str(), strlen(sourcesAttr.c_str()))};
char *devTag = strtok(sourcesLiteral.get(), ",");
while (devTag != NULL) {
diff --git a/services/audiopolicy/engine/common/include/EngineBase.h b/services/audiopolicy/engine/common/include/EngineBase.h
index 9bef97c..0f8b0a5 100644
--- a/services/audiopolicy/engine/common/include/EngineBase.h
+++ b/services/audiopolicy/engine/common/include/EngineBase.h
@@ -170,11 +170,13 @@
status_t getMediaDevicesForRole(device_role_t role, const DeviceVector& availableDevices,
DeviceVector& devices) const;
+ void dumpCapturePresetDevicesRoleMap(String8 *dst, int spaces) const;
+
AudioPolicyManagerObserver *mApmObserver = nullptr;
ProductStrategyMap mProductStrategies;
- ProductStrategyPreferredRoutingMap mProductStrategyPreferredDevices;
- CapturePresetDevicesRoleMap mCapturePresetDevicesRole;
+ ProductStrategyDevicesRoleMap mProductStrategyDeviceRoleMap;
+ CapturePresetDevicesRoleMap mCapturePresetDevicesRoleMap;
VolumeGroupMap mVolumeGroups;
LastRemovableMediaDevices mLastRemovableMediaDevices;
audio_mode_t mPhoneState = AUDIO_MODE_NORMAL; /**< current phone state. */
diff --git a/services/audiopolicy/engine/common/include/ProductStrategy.h b/services/audiopolicy/engine/common/include/ProductStrategy.h
index 54625ea..2aa2f9a 100644
--- a/services/audiopolicy/engine/common/include/ProductStrategy.h
+++ b/services/audiopolicy/engine/common/include/ProductStrategy.h
@@ -18,20 +18,20 @@
#include "VolumeGroup.h"
-#include <system/audio.h>
-#include <utils/RefBase.h>
-#include <HandleGenerator.h>
-#include <string>
-#include <vector>
#include <map>
-#include <utils/Errors.h>
-#include <utils/String8.h>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include <HandleGenerator.h>
#include <media/AudioAttributes.h>
#include <media/AudioContainers.h>
#include <media/AudioDeviceTypeAddr.h>
#include <media/AudioPolicy.h>
-
-#include <vector>
+#include <system/audio.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+#include <utils/String8.h>
namespace android {
@@ -170,11 +170,12 @@
product_strategy_t mDefaultStrategy = PRODUCT_STRATEGY_NONE;
};
-class ProductStrategyPreferredRoutingMap : public std::map<product_strategy_t,
- AudioDeviceTypeAddrVector>
-{
-public:
- void dump(String8 *dst, int spaces = 0) const;
-};
+using ProductStrategyDevicesRoleMap =
+ std::map<std::pair<product_strategy_t, device_role_t>, AudioDeviceTypeAddrVector>;
+
+void dumpProductStrategyDevicesRoleMap(
+ const ProductStrategyDevicesRoleMap& productStrategyDeviceRoleMap,
+ String8 *dst,
+ int spaces);
} // namespace android
diff --git a/services/audiopolicy/engine/common/src/EngineBase.cpp b/services/audiopolicy/engine/common/src/EngineBase.cpp
index 37e4caa..150a9a8 100644
--- a/services/audiopolicy/engine/common/src/EngineBase.cpp
+++ b/services/audiopolicy/engine/common/src/EngineBase.cpp
@@ -17,6 +17,10 @@
#define LOG_TAG "APM::AudioPolicyEngine/Base"
//#define LOG_NDEBUG 0
+#include <functional>
+#include <string>
+#include <sys/stat.h>
+
#include "EngineBase.h"
#include "EngineDefaultConfig.h"
#include <TypeConverter.h>
@@ -148,10 +152,15 @@
});
return iter != end(volumeGroups);
};
+ auto fileExists = [](const char* path) {
+ struct stat fileStat;
+ return stat(path, &fileStat) == 0 && S_ISREG(fileStat.st_mode);
+ };
- auto result = engineConfig::parse();
+ auto result = fileExists(engineConfig::DEFAULT_PATH) ?
+ engineConfig::parse(engineConfig::DEFAULT_PATH) : engineConfig::ParsingResult{};
if (result.parsedConfig == nullptr) {
- ALOGW("%s: No configuration found, using default matching phone experience.", __FUNCTION__);
+ ALOGD("%s: No configuration found, using default matching phone experience.", __FUNCTION__);
engineConfig::Config config = gDefaultEngineConfig;
android::status_t ret = engineConfig::parseLegacyVolumes(config.volumeGroups);
result = {std::make_unique<engineConfig::Config>(config),
@@ -342,23 +351,33 @@
return NO_ERROR;
}
-status_t EngineBase::setDevicesRoleForStrategy(product_strategy_t strategy, device_role_t role,
- const AudioDeviceTypeAddrVector &devices)
-{
- // verify strategy exists
- if (mProductStrategies.find(strategy) == mProductStrategies.end()) {
- ALOGE("%s invalid strategy %u", __func__, strategy);
+namespace {
+template <typename T>
+status_t setDevicesRoleForT(
+ std::map<std::pair<T, device_role_t>, AudioDeviceTypeAddrVector>& tDevicesRoleMap,
+ T t, device_role_t role, const AudioDeviceTypeAddrVector &devices,
+ const std::string& logStr, std::function<bool(T)> p) {
+ if (!p(t)) {
+ ALOGE("%s invalid %s %u", __func__, logStr.c_str(), t);
return BAD_VALUE;
}
switch (role) {
case DEVICE_ROLE_PREFERRED:
- mProductStrategyPreferredDevices[strategy] = devices;
- break;
- case DEVICE_ROLE_DISABLED:
- // TODO: support set devices role as disabled for strategy.
- ALOGI("%s no implemented for role as %d", __func__, role);
- break;
+ case DEVICE_ROLE_DISABLED: {
+ tDevicesRoleMap[std::make_pair(t, role)] = devices;
+ // The preferred devices and disabled devices are mutually exclusive. Once a device is added
+ // the a list, it must be removed from the other one.
+ const device_role_t roleToRemove = role == DEVICE_ROLE_PREFERRED ? DEVICE_ROLE_DISABLED
+ : DEVICE_ROLE_PREFERRED;
+ auto it = tDevicesRoleMap.find(std::make_pair(t, roleToRemove));
+ if (it != tDevicesRoleMap.end()) {
+ it->second = excludeDeviceTypeAddrsFrom(it->second, devices);
+ if (it->second.empty()) {
+ tDevicesRoleMap.erase(it);
+ }
+ }
+ } break;
case DEVICE_ROLE_NONE:
// Intentionally fall-through as it is no need to set device role as none for a strategy.
default:
@@ -368,28 +387,26 @@
return NO_ERROR;
}
-status_t EngineBase::removeDevicesRoleForStrategy(product_strategy_t strategy, device_role_t role)
-{
- // verify strategy exists
- if (mProductStrategies.find(strategy) == mProductStrategies.end()) {
- ALOGE("%s invalid strategy %u", __func__, strategy);
+template <typename T>
+status_t removeAllDevicesRoleForT(
+ std::map<std::pair<T, device_role_t>, AudioDeviceTypeAddrVector>& tDevicesRoleMap,
+ T t, device_role_t role, const std::string& logStr, std::function<bool(T)> p) {
+ if (!p(t)) {
+ ALOGE("%s invalid %s %u", __func__, logStr.c_str(), t);
return BAD_VALUE;
}
switch (role) {
case DEVICE_ROLE_PREFERRED:
- if (mProductStrategyPreferredDevices.erase(strategy) == 0) {
- // no preferred device was set
+ case DEVICE_ROLE_DISABLED:
+ if (tDevicesRoleMap.erase(std::make_pair(t, role)) == 0) {
+ // no preferred/disabled device was set
return NAME_NOT_FOUND;
}
break;
- case DEVICE_ROLE_DISABLED:
- // TODO: support remove devices role as disabled for strategy.
- ALOGI("%s no implemented for role as %d", __func__, role);
- break;
case DEVICE_ROLE_NONE:
// Intentionally fall-through as it makes no sense to remove devices with
- // role as DEVICE_ROLE_NONE for a strategy
+ // role as DEVICE_ROLE_NONE
default:
ALOGE("%s invalid role %d", __func__, role);
return BAD_VALUE;
@@ -397,25 +414,26 @@
return NO_ERROR;
}
-status_t EngineBase::getDevicesForRoleAndStrategy(product_strategy_t strategy, device_role_t role,
- AudioDeviceTypeAddrVector &devices) const
-{
- // verify strategy exists
- if (mProductStrategies.find(strategy) == mProductStrategies.end()) {
- ALOGE("%s unknown strategy %u", __func__, strategy);
+template <typename T>
+status_t getDevicesRoleForT(
+ const std::map<std::pair<T, device_role_t>, AudioDeviceTypeAddrVector>& tDevicesRoleMap,
+ T t, device_role_t role, AudioDeviceTypeAddrVector &devices, const std::string& logStr,
+ std::function<bool(T)> p) {
+ if (!p(t)) {
+ ALOGE("%s invalid %s %u", __func__, logStr.c_str(), t);
return BAD_VALUE;
}
switch (role) {
- case DEVICE_ROLE_PREFERRED: {
- // preferred device for this strategy?
- auto devIt = mProductStrategyPreferredDevices.find(strategy);
- if (devIt == mProductStrategyPreferredDevices.end()) {
- ALOGV("%s no preferred device for strategy %u", __func__, strategy);
+ case DEVICE_ROLE_PREFERRED:
+ case DEVICE_ROLE_DISABLED: {
+ auto it = tDevicesRoleMap.find(std::make_pair(t, role));
+ if (it == tDevicesRoleMap.end()) {
+ ALOGV("%s no device as role %u for %s %u", __func__, role, logStr.c_str(), t);
return NAME_NOT_FOUND;
}
- devices = devIt->second;
+ devices = it->second;
} break;
case DEVICE_ROLE_NONE:
// Intentionally fall-through as the DEVICE_ROLE_NONE is never set
@@ -426,32 +444,45 @@
return NO_ERROR;
}
+} // namespace
+
+status_t EngineBase::setDevicesRoleForStrategy(product_strategy_t strategy, device_role_t role,
+ const AudioDeviceTypeAddrVector &devices)
+{
+ std::function<bool(product_strategy_t)> p = [this](product_strategy_t strategy) {
+ return mProductStrategies.find(strategy) != mProductStrategies.end();
+ };
+ return setDevicesRoleForT(
+ mProductStrategyDeviceRoleMap, strategy, role, devices, "strategy" /*logStr*/, p);
+}
+
+status_t EngineBase::removeDevicesRoleForStrategy(product_strategy_t strategy, device_role_t role)
+{
+ std::function<bool(product_strategy_t)> p = [this](product_strategy_t strategy) {
+ return mProductStrategies.find(strategy) != mProductStrategies.end();
+ };
+ return removeAllDevicesRoleForT(
+ mProductStrategyDeviceRoleMap, strategy, role, "strategy" /*logStr*/, p);
+}
+
+status_t EngineBase::getDevicesForRoleAndStrategy(product_strategy_t strategy, device_role_t role,
+ AudioDeviceTypeAddrVector &devices) const
+{
+ std::function<bool(product_strategy_t)> p = [this](product_strategy_t strategy) {
+ return mProductStrategies.find(strategy) != mProductStrategies.end();
+ };
+ return getDevicesRoleForT(
+ mProductStrategyDeviceRoleMap, strategy, role, devices, "strategy" /*logStr*/, p);
+}
+
status_t EngineBase::setDevicesRoleForCapturePreset(audio_source_t audioSource, device_role_t role,
const AudioDeviceTypeAddrVector &devices)
{
- // verify if the audio source is valid
- if (!audio_is_valid_audio_source(audioSource)) {
- ALOGE("%s unknown audio source %u", __func__, audioSource);
- }
-
- switch (role) {
- case DEVICE_ROLE_PREFERRED:
- mCapturePresetDevicesRole[audioSource][role] = devices;
- // When the devices are set as preferred devices, remove them from the disabled devices.
- doRemoveDevicesRoleForCapturePreset(
- audioSource, DEVICE_ROLE_DISABLED, devices, false /*forceMatched*/);
- break;
- case DEVICE_ROLE_DISABLED:
- // TODO: support setting devices role as disabled for capture preset.
- ALOGI("%s no implemented for role as %d", __func__, role);
- break;
- case DEVICE_ROLE_NONE:
- // Intentionally fall-through as it is no need to set device role as none
- default:
- ALOGE("%s invalid role %d", __func__, role);
- return BAD_VALUE;
- }
- return NO_ERROR;
+ std::function<bool(audio_source_t)> p = [](audio_source_t audioSource) {
+ return audio_is_valid_audio_source(audioSource);
+ };
+ return setDevicesRoleForT(
+ mCapturePresetDevicesRoleMap, audioSource, role, devices, "audio source" /*logStr*/, p);
}
status_t EngineBase::addDevicesRoleForCapturePreset(audio_source_t audioSource, device_role_t role,
@@ -464,19 +495,20 @@
switch (role) {
case DEVICE_ROLE_PREFERRED:
- mCapturePresetDevicesRole[audioSource][role] = excludeDeviceTypeAddrsFrom(
- mCapturePresetDevicesRole[audioSource][role], devices);
- for (const auto& device : devices) {
- mCapturePresetDevicesRole[audioSource][role].push_back(device);
+ case DEVICE_ROLE_DISABLED: {
+ const auto audioSourceRole = std::make_pair(audioSource, role);
+ mCapturePresetDevicesRoleMap[audioSourceRole] = excludeDeviceTypeAddrsFrom(
+ mCapturePresetDevicesRoleMap[audioSourceRole], devices);
+ for (const auto &device : devices) {
+ mCapturePresetDevicesRoleMap[audioSourceRole].push_back(device);
}
// When the devices are set as preferred devices, remove them from the disabled devices.
doRemoveDevicesRoleForCapturePreset(
- audioSource, DEVICE_ROLE_DISABLED, devices, false /*forceMatched*/);
- break;
- case DEVICE_ROLE_DISABLED:
- // TODO: support setting devices role as disabled for capture preset.
- ALOGI("%s no implemented for role as %d", __func__, role);
- break;
+ audioSource,
+ role == DEVICE_ROLE_PREFERRED ? DEVICE_ROLE_DISABLED : DEVICE_ROLE_PREFERRED,
+ devices,
+ false /*forceMatched*/);
+ } break;
case DEVICE_ROLE_NONE:
// Intentionally fall-through as it is no need to set device role as none
default:
@@ -502,21 +534,22 @@
switch (role) {
case DEVICE_ROLE_PREFERRED:
case DEVICE_ROLE_DISABLED: {
- if (mCapturePresetDevicesRole.count(audioSource) == 0 ||
- mCapturePresetDevicesRole[audioSource].count(role) == 0) {
+ const auto audioSourceRole = std::make_pair(audioSource, role);
+ if (mCapturePresetDevicesRoleMap.find(audioSourceRole) ==
+ mCapturePresetDevicesRoleMap.end()) {
return NAME_NOT_FOUND;
}
AudioDeviceTypeAddrVector remainingDevices = excludeDeviceTypeAddrsFrom(
- mCapturePresetDevicesRole[audioSource][role], devices);
+ mCapturePresetDevicesRoleMap[audioSourceRole], devices);
if (forceMatched && remainingDevices.size() !=
- mCapturePresetDevicesRole[audioSource][role].size() - devices.size()) {
+ mCapturePresetDevicesRoleMap[audioSourceRole].size() - devices.size()) {
// There are some devices from `devicesToRemove` that are not shown in the cached record
return BAD_VALUE;
}
- mCapturePresetDevicesRole[audioSource][role] = remainingDevices;
- if (mCapturePresetDevicesRole[audioSource][role].empty()) {
+ mCapturePresetDevicesRoleMap[audioSourceRole] = remainingDevices;
+ if (mCapturePresetDevicesRoleMap[audioSourceRole].empty()) {
// Remove the role when device list is empty
- mCapturePresetDevicesRole[audioSource].erase(role);
+ mCapturePresetDevicesRoleMap.erase(audioSourceRole);
}
} break;
case DEVICE_ROLE_NONE:
@@ -532,63 +565,21 @@
status_t EngineBase::clearDevicesRoleForCapturePreset(audio_source_t audioSource,
device_role_t role)
{
- // verify if the audio source is valid
- if (!audio_is_valid_audio_source(audioSource)) {
- ALOGE("%s unknown audio source %u", __func__, audioSource);
- }
-
- switch (role) {
- case DEVICE_ROLE_PREFERRED:
- if (mCapturePresetDevicesRole.count(audioSource) == 0 ||
- mCapturePresetDevicesRole[audioSource].erase(role) == 0) {
- // no preferred device for the given audio source
- return NAME_NOT_FOUND;
- }
- break;
- case DEVICE_ROLE_DISABLED:
- // TODO: support remove devices role as disabled for strategy.
- ALOGI("%s no implemented for role as %d", __func__, role);
- break;
- case DEVICE_ROLE_NONE:
- // Intentionally fall-through as it makes no sense to remove devices with
- // role as DEVICE_ROLE_NONE for a strategy
- default:
- ALOGE("%s invalid role %d", __func__, role);
- return BAD_VALUE;
- }
- return NO_ERROR;
+ std::function<bool(audio_source_t)> p = [](audio_source_t audioSource) {
+ return audio_is_valid_audio_source(audioSource);
+ };
+ return removeAllDevicesRoleForT(
+ mCapturePresetDevicesRoleMap, audioSource, role, "audio source" /*logStr*/, p);
}
status_t EngineBase::getDevicesForRoleAndCapturePreset(audio_source_t audioSource,
device_role_t role, AudioDeviceTypeAddrVector &devices) const
{
- // verify if the audio source is valid
- if (!audio_is_valid_audio_source(audioSource)) {
- ALOGE("%s unknown audio source %u", __func__, audioSource);
- return BAD_VALUE;
- }
-
- switch (role) {
- case DEVICE_ROLE_PREFERRED:
- case DEVICE_ROLE_DISABLED: {
- if (mCapturePresetDevicesRole.count(audioSource) == 0) {
- return NAME_NOT_FOUND;
- }
- auto devIt = mCapturePresetDevicesRole.at(audioSource).find(role);
- if (devIt == mCapturePresetDevicesRole.at(audioSource).end()) {
- ALOGV("%s no devices role(%d) for capture preset %u", __func__, role, audioSource);
- return NAME_NOT_FOUND;
- }
-
- devices = devIt->second;
- } break;
- case DEVICE_ROLE_NONE:
- // Intentionally fall-through as the DEVICE_ROLE_NONE is never set
- default:
- ALOGE("%s invalid role %d", __func__, role);
- return BAD_VALUE;
- }
- return NO_ERROR;
+ std::function<bool(audio_source_t)> p = [](audio_source_t audioSource) {
+ return audio_is_valid_audio_source(audioSource);
+ };
+ return getDevicesRoleForT(
+ mCapturePresetDevicesRoleMap, audioSource, role, devices, "audio source" /*logStr*/, p);
}
status_t EngineBase::getMediaDevicesForRole(device_role_t role,
@@ -630,10 +621,22 @@
return activeDevices;
}
+void EngineBase::dumpCapturePresetDevicesRoleMap(String8 *dst, int spaces) const
+{
+ dst->appendFormat("\n%*sDevice role per capture preset dump:", spaces, "");
+ for (const auto& [capturePresetRolePair, devices] : mCapturePresetDevicesRoleMap) {
+ dst->appendFormat("\n%*sCapture preset(%u) Device Role(%u) Devices(%s)", spaces + 2, "",
+ capturePresetRolePair.first, capturePresetRolePair.second,
+ dumpAudioDeviceTypeAddrVector(devices, true /*includeSensitiveInfo*/).c_str());
+ }
+ dst->appendFormat("\n");
+}
+
void EngineBase::dump(String8 *dst) const
{
mProductStrategies.dump(dst, 2);
- mProductStrategyPreferredDevices.dump(dst, 2);
+ dumpProductStrategyDevicesRoleMap(mProductStrategyDeviceRoleMap, dst, 2);
+ dumpCapturePresetDevicesRoleMap(dst, 2);
mVolumeGroups.dump(dst, 2);
}
diff --git a/services/audiopolicy/engine/common/src/ProductStrategy.cpp b/services/audiopolicy/engine/common/src/ProductStrategy.cpp
index d4cea5a..b3d144f 100644
--- a/services/audiopolicy/engine/common/src/ProductStrategy.cpp
+++ b/services/audiopolicy/engine/common/src/ProductStrategy.cpp
@@ -320,14 +320,15 @@
}
}
-void ProductStrategyPreferredRoutingMap::dump(android::String8* dst, int spaces) const {
- dst->appendFormat("\n%*sPreferred devices per product strategy dump:", spaces, "");
- for (const auto& iter : *this) {
- dst->appendFormat("\n%*sStrategy %u %s",
- spaces + 2, "",
- (uint32_t) iter.first,
- dumpAudioDeviceTypeAddrVector(iter.second, true /*includeSensitiveInfo*/)
- .c_str());
+void dumpProductStrategyDevicesRoleMap(
+ const ProductStrategyDevicesRoleMap& productStrategyDeviceRoleMap,
+ String8 *dst,
+ int spaces) {
+ dst->appendFormat("\n%*sDevice role per product strategy dump:", spaces, "");
+ for (const auto& [strategyRolePair, devices] : productStrategyDeviceRoleMap) {
+ dst->appendFormat("\n%*sStrategy(%u) Device Role(%u) Devices(%s)", spaces + 2, "",
+ strategyRolePair.first, strategyRolePair.second,
+ dumpAudioDeviceTypeAddrVector(devices, true /*includeSensitiveInfo*/).c_str());
}
dst->appendFormat("\n");
}
diff --git a/services/audiopolicy/engine/config/src/EngineConfig.cpp b/services/audiopolicy/engine/config/src/EngineConfig.cpp
index 7cfef5b..1c86051 100644
--- a/services/audiopolicy/engine/config/src/EngineConfig.cpp
+++ b/services/audiopolicy/engine/config/src/EngineConfig.cpp
@@ -643,7 +643,11 @@
xmlDocPtr doc;
doc = xmlParseFile(path);
if (doc == NULL) {
- ALOGE("%s: Could not parse document %s", __FUNCTION__, path);
+ // It is OK not to find an engine config file at the default location
+ // as the caller will default to hardcoded default config
+ if (strncmp(path, DEFAULT_PATH, strlen(DEFAULT_PATH))) {
+ ALOGW("%s: Could not parse document %s", __FUNCTION__, path);
+ }
return {nullptr, 0};
}
xmlNodePtr cur = xmlDocGetRootElement(doc);
diff --git a/services/audiopolicy/engine/interface/EngineInterface.h b/services/audiopolicy/engine/interface/EngineInterface.h
index f0a01d3..518f86e 100644
--- a/services/audiopolicy/engine/interface/EngineInterface.h
+++ b/services/audiopolicy/engine/interface/EngineInterface.h
@@ -16,6 +16,8 @@
#pragma once
+#include <utility>
+
#include <AudioPolicyManagerObserver.h>
#include <media/AudioProductStrategy.h>
#include <media/AudioVolumeGroup.h>
@@ -35,7 +37,7 @@
using StrategyVector = std::vector<product_strategy_t>;
using VolumeGroupVector = std::vector<volume_group_t>;
using CapturePresetDevicesRoleMap =
- std::map<audio_source_t, std::map<device_role_t, AudioDeviceTypeAddrVector>>;
+ std::map<std::pair<audio_source_t, device_role_t>, AudioDeviceTypeAddrVector>;
/**
* This interface is dedicated to the policy manager that a Policy Engine shall implement.
@@ -171,8 +173,10 @@
* @param[out] mix to be used if a mix has been installed for the given audio attributes.
* @return selected input device for the audio attributes, may be null if error.
*/
- virtual sp<DeviceDescriptor> getInputDeviceForAttributes(
- const audio_attributes_t &attr, sp<AudioPolicyMix> *mix = nullptr) const = 0;
+ virtual sp<DeviceDescriptor> getInputDeviceForAttributes(const audio_attributes_t &attr,
+ uid_t uid = 0,
+ sp<AudioPolicyMix> *mix = nullptr)
+ const = 0;
/**
* Get the legacy stream type for a given audio attributes.
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.cpp b/services/audiopolicy/engineconfigurable/src/Engine.cpp
index 6d42fcf..b0c376a 100644
--- a/services/audiopolicy/engineconfigurable/src/Engine.cpp
+++ b/services/audiopolicy/engineconfigurable/src/Engine.cpp
@@ -310,6 +310,7 @@
}
sp<DeviceDescriptor> Engine::getInputDeviceForAttributes(const audio_attributes_t &attr,
+ uid_t uid,
sp<AudioPolicyMix> *mix) const
{
const auto &policyMixes = getApmObserver()->getAudioPolicyMixCollection();
@@ -328,7 +329,10 @@
return device;
}
- device = policyMixes.getDeviceAndMixForInputSource(attr.source, availableInputDevices, mix);
+ device = policyMixes.getDeviceAndMixForInputSource(attr.source,
+ availableInputDevices,
+ uid,
+ mix);
if (device != nullptr) {
return device;
}
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.h b/services/audiopolicy/engineconfigurable/src/Engine.h
index 3b371d8..d8e2742 100644
--- a/services/audiopolicy/engineconfigurable/src/Engine.h
+++ b/services/audiopolicy/engineconfigurable/src/Engine.h
@@ -61,8 +61,10 @@
DeviceVector getOutputDevicesForStream(audio_stream_type_t stream,
bool fromCache = false) const override;
- sp<DeviceDescriptor> getInputDeviceForAttributes(
- const audio_attributes_t &attr, sp<AudioPolicyMix> *mix = nullptr) const override;
+ sp<DeviceDescriptor> getInputDeviceForAttributes(const audio_attributes_t &attr,
+ uid_t uid = 0,
+ sp<AudioPolicyMix> *mix = nullptr)
+ const override;
void updateDeviceSelectionCache() override;
diff --git a/services/audiopolicy/engineconfigurable/tools/buildCommonTypesStructureFile.py b/services/audiopolicy/engineconfigurable/tools/buildCommonTypesStructureFile.py
index 5083b14..43b3dd2 100755
--- a/services/audiopolicy/engineconfigurable/tools/buildCommonTypesStructureFile.py
+++ b/services/audiopolicy/engineconfigurable/tools/buildCommonTypesStructureFile.py
@@ -172,12 +172,6 @@
logging.info("added stub input device mask")
# Transform input source in inclusive criterion
- shift = len(all_component_types['OutputDevicesMask'])
- if shift > 32:
- logging.critical("OutputDevicesMask incompatible with criterion representation on 32 bits")
- logging.info("EXIT ON FAILURE")
- exit(1)
-
for component_types in all_component_types:
values = ','.join('{}:{}'.format(value, key) for key, value in all_component_types[component_types].items())
logging.info("{}: <{}>".format(component_types, values))
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index 1a903a6..edcdf5a 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -196,7 +196,7 @@
if (desc->isActive() && !audio_is_linear_pcm(desc->getFormat())) {
availableOutputDevices.remove(desc->devices().getDevicesFromTypes({
AUDIO_DEVICE_OUT_HDMI, AUDIO_DEVICE_OUT_SPDIF,
- AUDIO_DEVICE_OUT_HDMI_ARC}));
+ AUDIO_DEVICE_OUT_HDMI_ARC, AUDIO_DEVICE_OUT_HDMI_EARC}));
}
}
} break;
@@ -366,7 +366,9 @@
if (strategy == STRATEGY_MEDIA) {
// ARC, SPDIF and AUX_LINE can co-exist with others.
devices3 = availableOutputDevices.getDevicesFromTypes({
- AUDIO_DEVICE_OUT_HDMI_ARC, AUDIO_DEVICE_OUT_SPDIF, AUDIO_DEVICE_OUT_AUX_LINE});
+ AUDIO_DEVICE_OUT_HDMI_ARC, AUDIO_DEVICE_OUT_HDMI_EARC,
+ AUDIO_DEVICE_OUT_SPDIF, AUDIO_DEVICE_OUT_AUX_LINE,
+ });
}
devices2.add(devices3);
@@ -707,6 +709,7 @@
}
sp<DeviceDescriptor> Engine::getInputDeviceForAttributes(const audio_attributes_t &attr,
+ uid_t uid,
sp<AudioPolicyMix> *mix) const
{
const auto &policyMixes = getApmObserver()->getAudioPolicyMixCollection();
@@ -726,7 +729,10 @@
return device;
}
- device = policyMixes.getDeviceAndMixForInputSource(attr.source, availableInputDevices, mix);
+ device = policyMixes.getDeviceAndMixForInputSource(attr.source,
+ availableInputDevices,
+ uid,
+ mix);
if (device != nullptr) {
return device;
}
diff --git a/services/audiopolicy/enginedefault/src/Engine.h b/services/audiopolicy/enginedefault/src/Engine.h
index 98f59d3..595e289 100644
--- a/services/audiopolicy/enginedefault/src/Engine.h
+++ b/services/audiopolicy/enginedefault/src/Engine.h
@@ -62,8 +62,10 @@
DeviceVector getOutputDevicesForStream(audio_stream_type_t stream,
bool fromCache = false) const override;
- sp<DeviceDescriptor> getInputDeviceForAttributes(
- const audio_attributes_t &attr, sp<AudioPolicyMix> *mix = nullptr) const override;
+ sp<DeviceDescriptor> getInputDeviceForAttributes(const audio_attributes_t &attr,
+ uid_t uid = 0,
+ sp<AudioPolicyMix> *mix = nullptr)
+ const override;
void updateDeviceSelectionCache() override;
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 2b9f8d7..ad359ec 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -284,7 +284,7 @@
setOutputDevices(desc, newDevices, force, 0);
}
if (!desc->isDuplicated() && desc->mProfile->hasDynamicAudioProfile() &&
- desc->devices() != activeMediaDevices &&
+ !activeMediaDevices.empty() && desc->devices() != activeMediaDevices &&
desc->supportsDevicesForPlayback(activeMediaDevices)) {
// Reopen the output to query the dynamic profiles when there is not active
// clients or all active clients will be rerouted. Otherwise, set the flag
@@ -2228,7 +2228,9 @@
} else {
// Prevent from storing invalid requested device id in clients
requestedDeviceId = AUDIO_PORT_HANDLE_NONE;
- device = mEngine->getInputDeviceForAttributes(attributes, &policyMix);
+ device = mEngine->getInputDeviceForAttributes(attributes, uid, &policyMix);
+ ALOGV_IF(device != nullptr, "%s found device type is 0x%X",
+ __FUNCTION__, device->type());
}
if (device == nullptr) {
ALOGW("getInputForAttr() could not find device for source %d", attributes.source);
@@ -2614,7 +2616,7 @@
bool close = false;
for (const auto& client : input->clientsList()) {
sp<DeviceDescriptor> device =
- mEngine->getInputDeviceForAttributes(client->attributes());
+ mEngine->getInputDeviceForAttributes(client->attributes(), client->uid());
if (!input->supportedDevices().contains(device)) {
close = true;
break;
@@ -5858,12 +5860,22 @@
// If we are not in call and no client is active on this input, this methods returns
// a null sp<>, causing the patch on the input stream to be released.
- audio_attributes_t attributes = inputDesc->getHighestPriorityAttributes();
+ audio_attributes_t attributes;
+ uid_t uid;
+ sp<RecordClientDescriptor> topClient = inputDesc->getHighestPriorityClient();
+ if (topClient != nullptr) {
+ attributes = topClient->attributes();
+ uid = topClient->uid();
+ } else {
+ attributes = { .source = AUDIO_SOURCE_DEFAULT };
+ uid = 0;
+ }
+
if (attributes.source == AUDIO_SOURCE_DEFAULT && isInCall()) {
attributes.source = AUDIO_SOURCE_VOICE_COMMUNICATION;
}
if (attributes.source != AUDIO_SOURCE_DEFAULT) {
- device = mEngine->getInputDeviceForAttributes(attributes);
+ device = mEngine->getInputDeviceForAttributes(attributes, uid);
}
return device;
diff --git a/services/audiopolicy/service/Android.bp b/services/audiopolicy/service/Android.bp
index d5ba756..14be671 100644
--- a/services/audiopolicy/service/Android.bp
+++ b/services/audiopolicy/service/Android.bp
@@ -23,6 +23,7 @@
],
shared_libs: [
+ "libactivitymanager_aidl",
"libaudioclient",
"libaudioclient_aidl_conversion",
"libaudiofoundation",
@@ -67,6 +68,7 @@
],
export_shared_lib_headers: [
+ "libactivitymanager_aidl",
"libsensorprivacy",
"media_permission-aidl-cpp",
],
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 639fa58..551013f 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -197,6 +197,7 @@
mAudioPolicyManager->setPhoneState(state);
mPhoneState = state;
mPhoneStateOwnerUid = uid;
+ updateUidStates_l();
return Status::ok();
}
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 4ffa9cc..b5eb98f 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -537,35 +537,34 @@
{
// Go over all active clients and allow capture (does not force silence) in the
// following cases:
-// The client source is virtual (remote submix, call audio TX or RX...)
-// OR The user the client is running in has microphone sensor privacy disabled
-// AND The client is the assistant
-// AND an accessibility service is on TOP or a RTT call is active
-// AND the source is VOICE_RECOGNITION or HOTWORD
-// OR uses VOICE_RECOGNITION AND is on TOP
-// OR uses HOTWORD
-// AND there is no active privacy sensitive capture or call
-// OR client has CAPTURE_AUDIO_OUTPUT privileged permission
-// OR The client is an accessibility service
-// AND Is on TOP
-// AND the source is VOICE_RECOGNITION or HOTWORD
-// OR The assistant is not on TOP
-// AND there is no active privacy sensitive capture or call
-// OR client has CAPTURE_AUDIO_OUTPUT privileged permission
-// AND is on TOP
+// The client is the assistant
+// AND an accessibility service is on TOP or a RTT call is active
// AND the source is VOICE_RECOGNITION or HOTWORD
-// OR the client source is HOTWORD
-// AND is on TOP
-// OR all active clients are using HOTWORD source
-// AND no call is active
-// OR client has CAPTURE_AUDIO_OUTPUT privileged permission
-// OR the client is the current InputMethodService
-// AND a RTT call is active AND the source is VOICE_RECOGNITION
-// OR Any client
-// AND The assistant is not on TOP
-// AND is on TOP or latest started
+// OR uses VOICE_RECOGNITION AND is on TOP
+// OR uses HOTWORD
+// AND there is no active privacy sensitive capture or call
+// OR client has CAPTURE_AUDIO_OUTPUT privileged permission
+// OR The client is an accessibility service
+// AND Is on TOP
+// AND the source is VOICE_RECOGNITION or HOTWORD
+// OR The assistant is not on TOP
// AND there is no active privacy sensitive capture or call
// OR client has CAPTURE_AUDIO_OUTPUT privileged permission
+// AND is on TOP
+// AND the source is VOICE_RECOGNITION or HOTWORD
+// OR the client source is virtual (remote submix, call audio TX or RX...)
+// OR the client source is HOTWORD
+// AND is on TOP
+// OR all active clients are using HOTWORD source
+// AND no call is active
+// OR client has CAPTURE_AUDIO_OUTPUT privileged permission
+// OR the client is the current InputMethodService
+// AND a RTT call is active AND the source is VOICE_RECOGNITION
+// OR Any client
+// AND The assistant is not on TOP
+// AND is on TOP or latest started
+// AND there is no active privacy sensitive capture or call
+// OR client has CAPTURE_AUDIO_OUTPUT privileged permission
sp<AudioRecordClient> topActive;
@@ -596,8 +595,7 @@
for (size_t i =0; i < mAudioRecordClients.size(); i++) {
sp<AudioRecordClient> current = mAudioRecordClients[i];
uid_t currentUid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(current->identity.uid));
- if (!current->active || (!isVirtualSource(current->attributes.source)
- && isUserSensorPrivacyEnabledForUid(currentUid))) {
+ if (!current->active) {
continue;
}
@@ -734,9 +732,6 @@
if (isVirtualSource(source)) {
// Allow capture for virtual (remote submix, call audio TX or RX...) sources
allowCapture = true;
- } else if (isUserSensorPrivacyEnabledForUid(currentUid)) {
- // If sensor privacy is enabled, don't allow capture
- allowCapture = false;
} else if (mUidPolicy->isAssistantUid(currentUid)) {
// For assistant allow capture if:
// An accessibility service is on TOP or a RTT call is active
@@ -1145,16 +1140,6 @@
return NO_INIT;
}
-bool AudioPolicyService::isUserSensorPrivacyEnabledForUid(uid_t uid) {
- userid_t userId = multiuser_get_user_id(uid);
- if (mMicrophoneSensorPrivacyPolicies.find(userId) == mMicrophoneSensorPrivacyPolicies.end()) {
- sp<SensorPrivacyPolicy> userPolicy = new SensorPrivacyPolicy(this);
- userPolicy->registerSelfForMicrophoneOnly(userId);
- mMicrophoneSensorPrivacyPolicies[userId] = userPolicy;
- }
- return mMicrophoneSensorPrivacyPolicies[userId]->isSensorPrivacyEnabled();
-}
-
status_t AudioPolicyService::printHelp(int out) {
return dprintf(out, "Audio policy service commands:\n"
" get-uid-state <PACKAGE> [--user USER_ID] gets the uid state\n"
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 145ba06..00d9670 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -346,8 +346,6 @@
status_t validateUsage(audio_usage_t usage);
status_t validateUsage(audio_usage_t usage, const media::permission::Identity& identity);
- bool isUserSensorPrivacyEnabledForUid(uid_t uid);
-
void updateUidStates();
void updateUidStates_l() REQUIRES(mLock);
@@ -908,8 +906,6 @@
void *mLibraryHandle = nullptr;
CreateAudioPolicyManagerInstance mCreateAudioPolicyManager;
DestroyAudioPolicyManagerInstance mDestroyAudioPolicyManager;
-
- std::map<userid_t, sp<SensorPrivacyPolicy>> mMicrophoneSensorPrivacyPolicies;
};
} // namespace android
diff --git a/services/camera/libcameraservice/Android.bp b/services/camera/libcameraservice/Android.bp
index a0b35a8..07c889b 100644
--- a/services/camera/libcameraservice/Android.bp
+++ b/services/camera/libcameraservice/Android.bp
@@ -113,6 +113,8 @@
"libutilscallstack",
"libutils",
"libbinder",
+ "libactivitymanager_aidl",
+ "libpermission",
"libcutils",
"libmedia",
"libmediautils",
@@ -154,12 +156,15 @@
],
static_libs: [
+ "libprocessinfoservice_aidl",
"libbinderthreadstateutils",
"media_permission-aidl-cpp",
],
export_shared_lib_headers: [
"libbinder",
+ "libactivitymanager_aidl",
+ "libpermission",
"libcamera_client",
"libfmq",
"libsensorprivacy",
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 6ac61cb..74d44cc 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -29,6 +29,7 @@
#include <inttypes.h>
#include <pthread.h>
+#include <android/content/pm/IPackageManagerNative.h>
#include <android/hardware/ICamera.h>
#include <android/hardware/ICameraClient.h>
@@ -41,7 +42,6 @@
#include <binder/MemoryBase.h>
#include <binder/MemoryHeapBase.h>
#include <binder/PermissionController.h>
-#include <binder/ProcessInfoService.h>
#include <binder/IResultReceiver.h>
#include <binderthreadstate/CallerUtils.h>
#include <cutils/atomic.h>
@@ -57,6 +57,7 @@
#include <media/IMediaHTTPService.h>
#include <media/mediaplayer.h>
#include <mediautils/BatteryNotifier.h>
+#include <processinfo/ProcessInfoService.h>
#include <utils/Errors.h>
#include <utils/Log.h>
#include <utils/String16.h>
@@ -225,10 +226,16 @@
return OK;
}
-void CameraService::broadcastTorchModeStatus(const String8& cameraId, TorchModeStatus status) {
+void CameraService::broadcastTorchModeStatus(const String8& cameraId, TorchModeStatus status,
+ SystemCameraKind systemCameraKind) {
Mutex::Autolock lock(mStatusListenerLock);
-
for (auto& i : mListenerList) {
+ if (shouldSkipStatusUpdates(systemCameraKind, i->isVendorListener(), i->getListenerPid(),
+ i->getListenerUid())) {
+ ALOGV("Skipping torch callback for system-only camera device %s",
+ cameraId.c_str());
+ continue;
+ }
i->getListener()->onTorchStatusChanged(mapToInterface(status), String16{cameraId});
}
}
@@ -237,10 +244,6 @@
VendorTagDescriptor::clearGlobalVendorTagDescriptor();
mUidPolicy->unregisterSelf();
mSensorPrivacyPolicy->unregisterSelf();
-
- for (auto const& [_, policy] : mCameraSensorPrivacyPolicies) {
- policy->unregisterSelf();
- }
}
void CameraService::onNewProviderRegistered() {
@@ -318,7 +321,7 @@
Mutex::Autolock al(mTorchStatusMutex);
mTorchStatusMap.add(id, TorchModeStatus::AVAILABLE_OFF);
- broadcastTorchModeStatus(id, TorchModeStatus::AVAILABLE_OFF);
+ broadcastTorchModeStatus(id, TorchModeStatus::AVAILABLE_OFF, deviceKind);
}
updateCameraNumAndIds();
@@ -479,12 +482,19 @@
void CameraService::onTorchStatusChanged(const String8& cameraId,
TorchModeStatus newStatus) {
+ SystemCameraKind systemCameraKind = SystemCameraKind::PUBLIC;
+ status_t res = getSystemCameraKind(cameraId, &systemCameraKind);
+ if (res != OK) {
+ ALOGE("%s: Could not get system camera kind for camera id %s", __FUNCTION__,
+ cameraId.string());
+ return;
+ }
Mutex::Autolock al(mTorchStatusMutex);
- onTorchStatusChangedLocked(cameraId, newStatus);
+ onTorchStatusChangedLocked(cameraId, newStatus, systemCameraKind);
}
void CameraService::onTorchStatusChangedLocked(const String8& cameraId,
- TorchModeStatus newStatus) {
+ TorchModeStatus newStatus, SystemCameraKind systemCameraKind) {
ALOGI("%s: Torch status changed for cameraId=%s, newStatus=%d",
__FUNCTION__, cameraId.string(), newStatus);
@@ -533,8 +543,7 @@
}
}
}
-
- broadcastTorchModeStatus(cameraId, newStatus);
+ broadcastTorchModeStatus(cameraId, newStatus, systemCameraKind);
}
static bool hasPermissionsForSystemCamera(int callingPid, int callingUid) {
@@ -793,8 +802,8 @@
Status CameraService::makeClient(const sp<CameraService>& cameraService,
const sp<IInterface>& cameraCb, const String16& packageName,
const std::optional<String16>& featureId, const String8& cameraId,
- int api1CameraId, int facing, int clientPid, uid_t clientUid, int servicePid,
- int deviceVersion, apiLevel effectiveApiLevel,
+ int api1CameraId, int facing, int sensorOrientation, int clientPid, uid_t clientUid,
+ int servicePid, int deviceVersion, apiLevel effectiveApiLevel,
/*out*/sp<BasicClient>* client) {
// Create CameraClient based on device version reported by the HAL.
@@ -817,13 +826,13 @@
sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
*client = new Camera2Client(cameraService, tmp, packageName, featureId,
cameraId, api1CameraId,
- facing, clientPid, clientUid,
+ facing, sensorOrientation, clientPid, clientUid,
servicePid);
} else { // Camera2 API route
sp<hardware::camera2::ICameraDeviceCallbacks> tmp =
static_cast<hardware::camera2::ICameraDeviceCallbacks*>(cameraCb.get());
*client = new CameraDeviceClient(cameraService, tmp, packageName, featureId,
- cameraId, facing, clientPid, clientUid, servicePid);
+ cameraId, facing, sensorOrientation, clientPid, clientUid, servicePid);
}
break;
default:
@@ -1638,7 +1647,7 @@
sp<BasicClient> tmp = nullptr;
if(!(ret = makeClient(this, cameraCb, clientPackageName, clientFeatureId,
- cameraId, api1CameraId, facing,
+ cameraId, api1CameraId, facing, orientation,
clientPid, clientUid, getpid(),
deviceVersion, effectiveApiLevel,
/*out*/&tmp)).isOk()) {
@@ -1704,8 +1713,9 @@
// Set camera muting behavior
if (client->supportsCameraMute()) {
- client->setCameraMute(mOverrideCameraMuteMode ||
- isUserSensorPrivacyEnabledForUid(clientUid));
+ bool isCameraPrivacyEnabled =
+ mSensorPrivacyPolicy->isCameraPrivacyEnabled(multiuser_get_user_id(clientUid));
+ client->setCameraMute(mOverrideCameraMuteMode || isCameraPrivacyEnabled);
}
if (shimUpdateOnly) {
@@ -1817,6 +1827,10 @@
String8 id = String8(cameraId.string());
int uid = CameraThreadState::getCallingUid();
+ if (shouldRejectSystemCameraConnection(id)) {
+ return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT, "Unable to set torch mode"
+ " for system only device %s: ", id.string());
+ }
// verify id is valid.
auto state = getCameraState(id);
if (state == nullptr) {
@@ -2019,7 +2033,50 @@
return Status::ok();
}
- Status CameraService::getConcurrentCameraIds(
+Status CameraService::notifyDisplayConfigurationChange() {
+ ATRACE_CALL();
+ const int callingPid = CameraThreadState::getCallingPid();
+ const int selfPid = getpid();
+
+ // Permission checks
+ if (callingPid != selfPid) {
+ // Ensure we're being called by system_server, or similar process with
+ // permissions to notify the camera service about system events
+ if (!checkCallingPermission(sCameraSendSystemEventsPermission)) {
+ const int uid = CameraThreadState::getCallingUid();
+ ALOGE("Permission Denial: cannot send updates to camera service about orientation"
+ " changes from pid=%d, uid=%d", callingPid, uid);
+ return STATUS_ERROR_FMT(ERROR_PERMISSION_DENIED,
+ "No permission to send updates to camera service about orientation"
+ " changes from pid=%d, uid=%d", callingPid, uid);
+ }
+ }
+
+ Mutex::Autolock lock(mServiceLock);
+
+ // Don't do anything if rotate-and-crop override via cmd is active
+ if (mOverrideRotateAndCropMode != ANDROID_SCALER_ROTATE_AND_CROP_AUTO) return Status::ok();
+
+ const auto clients = mActiveClientManager.getAll();
+ for (auto& current : clients) {
+ if (current != nullptr) {
+ const auto basicClient = current->getValue();
+ if (basicClient.get() != nullptr) {
+ if (CameraServiceProxyWrapper::isRotateAndCropOverrideNeeded(
+ basicClient->getPackageName(), basicClient->getCameraOrientation(),
+ basicClient->getCameraFacing())) {
+ basicClient->setRotateAndCropOverride(ANDROID_SCALER_ROTATE_AND_CROP_90);
+ } else {
+ basicClient->setRotateAndCropOverride(ANDROID_SCALER_ROTATE_AND_CROP_NONE);
+ }
+ }
+ }
+ }
+
+ return Status::ok();
+}
+
+Status CameraService::getConcurrentCameraIds(
std::vector<ConcurrentCameraIdCombination>* concurrentCameraIds) {
ATRACE_CALL();
if (!concurrentCameraIds) {
@@ -2178,6 +2235,11 @@
return shouldSkipStatusUpdates(deviceKind, isVendorListener, clientPid,
clientUid);}), cameraStatuses->end());
+ //cameraStatuses will have non-eligible camera ids removed.
+ std::set<String16> idsChosenForCallback;
+ for (const auto &s : *cameraStatuses) {
+ idsChosenForCallback.insert(String16(s.cameraId));
+ }
/*
* Immediately signal current torch status to this listener only
@@ -2187,7 +2249,11 @@
Mutex::Autolock al(mTorchStatusMutex);
for (size_t i = 0; i < mTorchStatusMap.size(); i++ ) {
String16 id = String16(mTorchStatusMap.keyAt(i).string());
- listener->onTorchStatusChanged(mapToInterface(mTorchStatusMap.valueAt(i)), id);
+ // The camera id is visible to the client. Fine to send torch
+ // callback.
+ if (idsChosenForCallback.find(id) != idsChosenForCallback.end()) {
+ listener->onTorchStatusChanged(mapToInterface(mTorchStatusMap.valueAt(i)), id);
+ }
}
}
@@ -2675,13 +2741,13 @@
const String16& clientPackageName,
const std::optional<String16>& clientFeatureId,
const String8& cameraIdStr,
- int api1CameraId, int cameraFacing,
+ int api1CameraId, int cameraFacing, int sensorOrientation,
int clientPid, uid_t clientUid,
int servicePid) :
CameraService::BasicClient(cameraService,
IInterface::asBinder(cameraClient),
clientPackageName, clientFeatureId,
- cameraIdStr, cameraFacing,
+ cameraIdStr, cameraFacing, sensorOrientation,
clientPid, clientUid,
servicePid),
mCameraId(api1CameraId)
@@ -2711,10 +2777,10 @@
CameraService::BasicClient::BasicClient(const sp<CameraService>& cameraService,
const sp<IBinder>& remoteCallback,
const String16& clientPackageName, const std::optional<String16>& clientFeatureId,
- const String8& cameraIdStr, int cameraFacing,
+ const String8& cameraIdStr, int cameraFacing, int sensorOrientation,
int clientPid, uid_t clientUid,
int servicePid):
- mCameraIdStr(cameraIdStr), mCameraFacing(cameraFacing),
+ mCameraIdStr(cameraIdStr), mCameraFacing(cameraFacing), mOrientation(sensorOrientation),
mClientPackageName(clientPackageName), mClientFeatureId(clientFeatureId),
mClientPid(clientPid), mClientUid(clientUid),
mServicePid(servicePid),
@@ -2811,6 +2877,13 @@
return mClientPackageName;
}
+int CameraService::BasicClient::getCameraFacing() const {
+ return mCameraFacing;
+}
+
+int CameraService::BasicClient::getCameraOrientation() const {
+ return mOrientation;
+}
int CameraService::BasicClient::getClientPid() const {
return mClientPid;
@@ -2880,10 +2953,17 @@
// If the calling Uid is trusted (a native service), the AppOpsManager could
// return MODE_IGNORED. Do not treat such case as error.
if (!mUidIsTrusted && res == AppOpsManager::MODE_IGNORED) {
- ALOGI("Camera %s: Access for \"%s\" has been restricted",
- mCameraIdStr.string(), String8(mClientPackageName).string());
- // Return the same error as for device policy manager rejection
- return -EACCES;
+ bool isUidActive = sCameraService->mUidPolicy->isUidActive(mClientUid,
+ mClientPackageName);
+ bool isCameraPrivacyEnabled =
+ sCameraService->mSensorPrivacyPolicy->isCameraPrivacyEnabled(
+ multiuser_get_user_id(mClientUid));
+ if (!isUidActive || !isCameraPrivacyEnabled) {
+ ALOGI("Camera %s: Access for \"%s\" has been restricted",
+ mCameraIdStr.string(), String8(mClientPackageName).string());
+ // Return the same error as for device policy manager rejection
+ return -EACCES;
+ }
}
}
@@ -2961,15 +3041,22 @@
block();
} else if (res == AppOpsManager::MODE_IGNORED) {
bool isUidActive = sCameraService->mUidPolicy->isUidActive(mClientUid, mClientPackageName);
+ bool isCameraPrivacyEnabled =
+ sCameraService->mSensorPrivacyPolicy->isCameraPrivacyEnabled(
+ multiuser_get_user_id(mClientUid));
ALOGI("Camera %s: Access for \"%s\" has been restricted, isUidTrusted %d, isUidActive %d",
mCameraIdStr.string(), String8(mClientPackageName).string(),
mUidIsTrusted, isUidActive);
// If the calling Uid is trusted (a native service), or the client Uid is active (WAR for
// b/175320666), the AppOpsManager could return MODE_IGNORED. Do not treat such cases as
// error.
- if (!mUidIsTrusted && !isUidActive) {
+ if (!mUidIsTrusted && isUidActive && isCameraPrivacyEnabled) {
+ setCameraMute(true);
+ } else if (!mUidIsTrusted && !isUidActive) {
block();
}
+ } else if (res == AppOpsManager::MODE_ALLOWED) {
+ setCameraMute(sCameraService->mOverrideCameraMuteMode);
}
}
@@ -3242,6 +3329,7 @@
if (mRegistered) {
return;
}
+ hasCameraPrivacyFeature(); // Called so the result is cached
mSpm.addSensorPrivacyListener(this);
mSensorPrivacyEnabled = mSpm.isSensorPrivacyEnabled();
status_t res = mSpm.linkToDeath(this);
@@ -3251,39 +3339,6 @@
}
}
-status_t CameraService::SensorPrivacyPolicy::registerSelfForIndividual(int userId) {
- Mutex::Autolock _l(mSensorPrivacyLock);
- if (mRegistered) {
- return OK;
- }
-
- status_t res = mSpm.addIndividualSensorPrivacyListener(userId,
- SensorPrivacyManager::INDIVIDUAL_SENSOR_CAMERA, this);
- if (res != OK) {
- ALOGE("Unable to register camera privacy listener: %s (%d)", strerror(-res), res);
- return res;
- }
-
- res = mSpm.isIndividualSensorPrivacyEnabled(userId,
- SensorPrivacyManager::INDIVIDUAL_SENSOR_CAMERA, mSensorPrivacyEnabled);
- if (res != OK) {
- ALOGE("Unable to check camera privacy: %s (%d)", strerror(-res), res);
- return res;
- }
-
- res = mSpm.linkToDeath(this);
- if (res != OK) {
- ALOGE("Register link to death failed for sensor privacy: %s (%d)", strerror(-res), res);
- return res;
- }
-
- mRegistered = true;
- mIsIndividual = true;
- mUserId = userId;
- ALOGV("SensorPrivacyPolicy: Registered with SensorPrivacyManager");
- return OK;
-}
-
void CameraService::SensorPrivacyPolicy::unregisterSelf() {
Mutex::Autolock _l(mSensorPrivacyLock);
mSpm.removeSensorPrivacyListener(this);
@@ -3297,20 +3352,24 @@
return mSensorPrivacyEnabled;
}
+bool CameraService::SensorPrivacyPolicy::isCameraPrivacyEnabled(userid_t userId) {
+ if (!hasCameraPrivacyFeature()) {
+ return false;
+ }
+ return mSpm.isIndividualSensorPrivacyEnabled(userId,
+ SensorPrivacyManager::INDIVIDUAL_SENSOR_CAMERA);
+}
+
binder::Status CameraService::SensorPrivacyPolicy::onSensorPrivacyChanged(bool enabled) {
{
Mutex::Autolock _l(mSensorPrivacyLock);
mSensorPrivacyEnabled = enabled;
}
// if sensor privacy is enabled then block all clients from accessing the camera
- sp<CameraService> service = mService.promote();
- if (service != nullptr) {
- if (mIsIndividual) {
- service->setMuteForAllClients(mUserId, enabled);
- } else {
- if (enabled) {
- service->blockAllClients();
- }
+ if (enabled) {
+ sp<CameraService> service = mService.promote();
+ if (service != nullptr) {
+ service->blockAllClients();
}
}
return binder::Status::ok();
@@ -3322,6 +3381,31 @@
mRegistered = false;
}
+bool CameraService::SensorPrivacyPolicy::hasCameraPrivacyFeature() {
+ if (!mNeedToCheckCameraPrivacyFeature) {
+ return mHasCameraPrivacyFeature;
+ }
+ bool hasCameraPrivacyFeature = false;
+ sp<IBinder> binder = defaultServiceManager()->getService(String16("package_native"));
+ if (binder != nullptr) {
+ sp<content::pm::IPackageManagerNative> packageManager =
+ interface_cast<content::pm::IPackageManagerNative>(binder);
+ if (packageManager != nullptr) {
+ binder::Status status = packageManager->hasSystemFeature(
+ String16("android.hardware.camera.toggle"), 0, &hasCameraPrivacyFeature);
+
+ if (status.isOk()) {
+ mNeedToCheckCameraPrivacyFeature = false;
+ mHasCameraPrivacyFeature = hasCameraPrivacyFeature;
+ } else {
+ ALOGE("Unable to check if camera privacy feature is supported");
+ }
+ }
+ }
+
+ return hasCameraPrivacyFeature;
+}
+
// ----------------------------------------------------------------------------
// CameraState
// ----------------------------------------------------------------------------
@@ -3757,7 +3841,7 @@
TorchModeStatus::AVAILABLE_OFF :
TorchModeStatus::NOT_AVAILABLE;
if (torchStatus != newTorchStatus) {
- onTorchStatusChangedLocked(cameraId, newTorchStatus);
+ onTorchStatusChangedLocked(cameraId, newTorchStatus, deviceKind);
}
}
}
@@ -3941,19 +4025,6 @@
}
}
-void CameraService::setMuteForAllClients(userid_t userId, bool enabled) {
- const auto clients = mActiveClientManager.getAll();
- for (auto& current : clients) {
- if (current != nullptr) {
- const auto basicClient = current->getValue();
- if (basicClient.get() != nullptr
- && multiuser_get_user_id(basicClient->getClientUid()) == userId) {
- basicClient->setCameraMute(enabled);
- }
- }
- }
-}
-
// NOTE: This is a remote API - make sure all args are validated
status_t CameraService::shellCommand(int in, int out, int err, const Vector<String16>& args) {
if (!checkCallingPermission(sManageCameraPermission, nullptr, nullptr)) {
@@ -4160,16 +4231,4 @@
return mode;
}
-bool CameraService::isUserSensorPrivacyEnabledForUid(uid_t uid) {
- userid_t userId = multiuser_get_user_id(uid);
- if (mCameraSensorPrivacyPolicies.find(userId) == mCameraSensorPrivacyPolicies.end()) {
- sp<SensorPrivacyPolicy> userPolicy = new SensorPrivacyPolicy(this);
- if (userPolicy->registerSelfForIndividual(userId) != OK) {
- return false;
- }
- mCameraSensorPrivacyPolicies[userId] = userPolicy;
- }
- return mCameraSensorPrivacyPolicies[userId]->isSensorPrivacyEnabled();
-}
-
}; // namespace android
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index c60fab8..771981c 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -167,6 +167,8 @@
virtual binder::Status notifyDeviceStateChange(int64_t newState);
+ virtual binder::Status notifyDisplayConfigurationChange();
+
// OK = supports api of that version, -EOPNOTSUPP = does not support
virtual binder::Status supportsCameraApi(
const String16& cameraId, int32_t apiVersion,
@@ -261,6 +263,12 @@
// Return the package name for this client
virtual String16 getPackageName() const;
+ // Return the camera facing for this client
+ virtual int getCameraFacing() const;
+
+ // Return the camera orientation for this client
+ virtual int getCameraOrientation() const;
+
// Notify client about a fatal error
virtual void notifyError(int32_t errorCode,
const CaptureResultExtras& resultExtras) = 0;
@@ -307,6 +315,7 @@
const std::optional<String16>& clientFeatureId,
const String8& cameraIdStr,
int cameraFacing,
+ int sensorOrientation,
int clientPid,
uid_t clientUid,
int servicePid);
@@ -323,6 +332,7 @@
static sp<CameraService> sCameraService;
const String8 mCameraIdStr;
const int mCameraFacing;
+ const int mOrientation;
String16 mClientPackageName;
std::optional<String16> mClientFeatureId;
pid_t mClientPid;
@@ -400,6 +410,7 @@
const String8& cameraIdStr,
int api1CameraId,
int cameraFacing,
+ int sensorOrientation,
int clientPid,
uid_t clientUid,
int servicePid);
@@ -654,13 +665,13 @@
public:
explicit SensorPrivacyPolicy(wp<CameraService> service)
: mService(service), mSensorPrivacyEnabled(false), mRegistered(false),
- mIsIndividual(false), mUserId(0) {}
+ mHasCameraPrivacyFeature(false), mNeedToCheckCameraPrivacyFeature(true) {}
void registerSelf();
- status_t registerSelfForIndividual(int userId);
void unregisterSelf();
bool isSensorPrivacyEnabled();
+ bool isCameraPrivacyEnabled(userid_t userId);
binder::Status onSensorPrivacyChanged(bool enabled);
@@ -673,8 +684,10 @@
Mutex mSensorPrivacyLock;
bool mSensorPrivacyEnabled;
bool mRegistered;
- bool mIsIndividual;
- userid_t mUserId;
+ bool mHasCameraPrivacyFeature;
+ bool mNeedToCheckCameraPrivacyFeature;
+
+ bool hasCameraPrivacyFeature();
};
sp<UidPolicy> mUidPolicy;
@@ -1009,7 +1022,8 @@
// handle torch mode status change and invoke callbacks. mTorchStatusMutex
// should be locked.
void onTorchStatusChangedLocked(const String8& cameraId,
- hardware::camera::common::V1_0::TorchModeStatus newStatus);
+ hardware::camera::common::V1_0::TorchModeStatus newStatus,
+ SystemCameraKind systemCameraKind);
// get a camera's torch status. mTorchStatusMutex should be locked.
status_t getTorchStatusLocked(const String8 &cameraId,
@@ -1052,9 +1066,6 @@
// Blocks all active clients.
void blockAllClients();
- // Mutes all active clients for a user.
- void setMuteForAllClients(userid_t userId, bool enabled);
-
// Overrides the UID state as if it is idle
status_t handleSetUidState(const Vector<String16>& args, int err);
@@ -1090,7 +1101,7 @@
static binder::Status makeClient(const sp<CameraService>& cameraService,
const sp<IInterface>& cameraCb, const String16& packageName,
const std::optional<String16>& featureId, const String8& cameraId, int api1CameraId,
- int facing, int clientPid, uid_t clientUid, int servicePid,
+ int facing, int sensorOrientation, int clientPid, uid_t clientUid, int servicePid,
int deviceVersion, apiLevel effectiveApiLevel,
/*out*/sp<BasicClient>* client);
@@ -1103,7 +1114,8 @@
void broadcastTorchModeStatus(const String8& cameraId,
- hardware::camera::common::V1_0::TorchModeStatus status);
+ hardware::camera::common::V1_0::TorchModeStatus status,
+ SystemCameraKind systemCameraKind);
void disconnectClient(const String8& id, sp<BasicClient> clientToDisconnect);
@@ -1118,7 +1130,7 @@
// Aggreated audio restriction mode for all camera clients
int32_t mAudioRestriction;
- // Current override rotate-and-crop mode
+ // Current override cmd rotate-and-crop mode; AUTO means no override
uint8_t mOverrideRotateAndCropMode = ANDROID_SCALER_ROTATE_AND_CROP_AUTO;
// Current image dump mask
@@ -1126,12 +1138,6 @@
// Current camera mute mode
bool mOverrideCameraMuteMode = false;
-
- // Map from user to sensor privacy policy
- std::map<userid_t, sp<SensorPrivacyPolicy>> mCameraSensorPrivacyPolicies;
-
- // Checks if the sensor privacy is enabled for the uid
- bool isUserSensorPrivacyEnabledForUid(uid_t uid);
};
} // namespace android
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 31cfed6..72b3c40 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -55,11 +55,12 @@
const String8& cameraDeviceId,
int api1CameraId,
int cameraFacing,
+ int sensorOrientation,
int clientPid,
uid_t clientUid,
int servicePid):
Camera2ClientBase(cameraService, cameraClient, clientPackageName, clientFeatureId,
- cameraDeviceId, api1CameraId, cameraFacing,
+ cameraDeviceId, api1CameraId, cameraFacing, sensorOrientation,
clientPid, clientUid, servicePid),
mParameters(api1CameraId, cameraFacing)
{
diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h
index 4d667e3..d16b242 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.h
+++ b/services/camera/libcameraservice/api1/Camera2Client.h
@@ -101,6 +101,7 @@
const String8& cameraDeviceId,
int api1CameraId,
int cameraFacing,
+ int sensorOrientation,
int clientPid,
uid_t clientUid,
int servicePid);
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 1b65d1a..343f4a7 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -61,6 +61,7 @@
const String8& cameraId,
int api1CameraId,
int cameraFacing,
+ int sensorOrientation,
int clientPid,
uid_t clientUid,
int servicePid) :
@@ -70,6 +71,7 @@
clientFeatureId,
cameraId,
cameraFacing,
+ sensorOrientation,
clientPid,
clientUid,
servicePid),
@@ -86,12 +88,13 @@
const std::optional<String16>& clientFeatureId,
const String8& cameraId,
int cameraFacing,
+ int sensorOrientation,
int clientPid,
uid_t clientUid,
int servicePid) :
Camera2ClientBase(cameraService, remoteCallback, clientPackageName, clientFeatureId,
cameraId, /*API1 camera ID*/ -1,
- cameraFacing, clientPid, clientUid, servicePid),
+ cameraFacing, sensorOrientation, clientPid, clientUid, servicePid),
mInputStream(),
mStreamingRequestId(REQUEST_ID_NONE),
mRequestIdCounter(0) {
@@ -1711,7 +1714,8 @@
if (offlineSession.get() != nullptr) {
offlineClient = new CameraOfflineSessionClient(sCameraService,
offlineSession, offlineCompositeStreamMap, cameraCb, mClientPackageName,
- mClientFeatureId, mCameraIdStr, mCameraFacing, mClientPid, mClientUid, mServicePid);
+ mClientFeatureId, mCameraIdStr, mCameraFacing, mOrientation, mClientPid, mClientUid,
+ mServicePid);
ret = sCameraService->addOfflineClient(mCameraIdStr, offlineClient);
}
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index adedf92..44ffeef 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -55,6 +55,7 @@
const String8& cameraId,
int api1CameraId,
int cameraFacing,
+ int sensorOrientation,
int clientPid,
uid_t clientUid,
int servicePid);
@@ -181,6 +182,7 @@
const std::optional<String16>& clientFeatureId,
const String8& cameraId,
int cameraFacing,
+ int sensorOrientation,
int clientPid,
uid_t clientUid,
int servicePid);
diff --git a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
index 5c5fcda..ba49325 100644
--- a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
+++ b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
@@ -49,13 +49,13 @@
const sp<ICameraDeviceCallbacks>& remoteCallback,
const String16& clientPackageName,
const std::optional<String16>& clientFeatureId,
- const String8& cameraIdStr, int cameraFacing,
+ const String8& cameraIdStr, int cameraFacing, int sensorOrientation,
int clientPid, uid_t clientUid, int servicePid) :
CameraService::BasicClient(
cameraService,
IInterface::asBinder(remoteCallback),
clientPackageName, clientFeatureId,
- cameraIdStr, cameraFacing, clientPid, clientUid, servicePid),
+ cameraIdStr, cameraFacing, sensorOrientation, clientPid, clientUid, servicePid),
mRemoteCallback(remoteCallback), mOfflineSession(session),
mCompositeStreamMap(offlineCompositeStreamMap) {}
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index 6fd8d45..1f79354 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -51,11 +51,13 @@
const String8& cameraId,
int api1CameraId,
int cameraFacing,
+ int sensorOrientation,
int clientPid,
uid_t clientUid,
int servicePid):
TClientBase(cameraService, remoteCallback, clientPackageName, clientFeatureId,
- cameraId, api1CameraId, cameraFacing, clientPid, clientUid, servicePid),
+ cameraId, api1CameraId, cameraFacing, sensorOrientation, clientPid, clientUid,
+ servicePid),
mSharedCameraCallbacks(remoteCallback),
mDeviceVersion(cameraService->getDeviceVersion(TClientBase::mCameraIdStr)),
mDevice(new Camera3Device(cameraId)),
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index 1ce4393..dab0050 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -52,6 +52,7 @@
const String8& cameraId,
int api1CameraId,
int cameraFacing,
+ int sensorOrientation,
int clientPid,
uid_t clientUid,
int servicePid);
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 62fc18f..6dffc5d 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -474,12 +474,12 @@
hardware::Return<void> CameraProviderManager::onRegistration(
const hardware::hidl_string& /*fqName*/,
const hardware::hidl_string& name,
- bool /*preexisting*/) {
+ bool preexisting) {
std::lock_guard<std::mutex> providerLock(mProviderLifecycleLock);
{
std::lock_guard<std::mutex> lock(mInterfaceMutex);
- addProviderLocked(name);
+ addProviderLocked(name, preexisting);
}
sp<StatusListener> listener = getStatusListener();
@@ -1230,31 +1230,53 @@
return falseRet;
}
-status_t CameraProviderManager::addProviderLocked(const std::string& newProvider) {
- for (const auto& providerInfo : mProviders) {
- if (providerInfo->mProviderName == newProvider) {
- ALOGW("%s: Camera provider HAL with name '%s' already registered", __FUNCTION__,
- newProvider.c_str());
- return ALREADY_EXISTS;
- }
- }
-
+status_t CameraProviderManager::tryToInitializeProviderLocked(
+ const std::string& providerName, const sp<ProviderInfo>& providerInfo) {
sp<provider::V2_4::ICameraProvider> interface;
- interface = mServiceProxy->tryGetService(newProvider);
+ interface = mServiceProxy->tryGetService(providerName);
if (interface == nullptr) {
- ALOGE("%s: Camera provider HAL '%s' is not actually available", __FUNCTION__,
- newProvider.c_str());
+ // The interface may not be started yet. In that case, this is not a
+ // fatal error.
+ ALOGW("%s: Camera provider HAL '%s' is not actually available", __FUNCTION__,
+ providerName.c_str());
return BAD_VALUE;
}
- sp<ProviderInfo> providerInfo = new ProviderInfo(newProvider, this);
- status_t res = providerInfo->initialize(interface, mDeviceState);
- if (res != OK) {
- return res;
+ return providerInfo->initialize(interface, mDeviceState);
+}
+
+status_t CameraProviderManager::addProviderLocked(const std::string& newProvider,
+ bool preexisting) {
+ // Several camera provider instances can be temporarily present.
+ // Defer initialization of a new instance until the older instance is properly removed.
+ auto providerInstance = newProvider + "-" + std::to_string(mProviderInstanceId);
+ bool providerPresent = false;
+ for (const auto& providerInfo : mProviders) {
+ if (providerInfo->mProviderName == newProvider) {
+ ALOGW("%s: Camera provider HAL with name '%s' already registered",
+ __FUNCTION__, newProvider.c_str());
+ if (preexisting) {
+ return ALREADY_EXISTS;
+ } else{
+ ALOGW("%s: The new provider instance will get initialized immediately after the"
+ " currently present instance is removed!", __FUNCTION__);
+ providerPresent = true;
+ break;
+ }
+ }
+ }
+
+ sp<ProviderInfo> providerInfo = new ProviderInfo(newProvider, providerInstance, this);
+ if (!providerPresent) {
+ status_t res = tryToInitializeProviderLocked(newProvider, providerInfo);
+ if (res != OK) {
+ return res;
+ }
}
mProviders.push_back(providerInfo);
+ mProviderInstanceId++;
return OK;
}
@@ -1264,12 +1286,14 @@
std::unique_lock<std::mutex> lock(mInterfaceMutex);
std::vector<String8> removedDeviceIds;
status_t res = NAME_NOT_FOUND;
+ std::string removedProviderName;
for (auto it = mProviders.begin(); it != mProviders.end(); it++) {
- if ((*it)->mProviderName == provider) {
+ if ((*it)->mProviderInstance == provider) {
removedDeviceIds.reserve((*it)->mDevices.size());
for (auto& deviceInfo : (*it)->mDevices) {
removedDeviceIds.push_back(String8(deviceInfo->mId.c_str()));
}
+ removedProviderName = (*it)->mProviderName;
mProviders.erase(it);
res = OK;
break;
@@ -1279,6 +1303,14 @@
ALOGW("%s: Camera provider HAL with name '%s' is not registered", __FUNCTION__,
provider.c_str());
} else {
+ // Check if there are any newer camera instances from the same provider and try to
+ // initialize.
+ for (const auto& providerInfo : mProviders) {
+ if (providerInfo->mProviderName == removedProviderName) {
+ return tryToInitializeProviderLocked(removedProviderName, providerInfo);
+ }
+ }
+
// Inform camera service of loss of presence for all the devices from this provider,
// without lock held for reentrancy
sp<StatusListener> listener = getStatusListener();
@@ -1287,7 +1319,9 @@
for (auto& id : removedDeviceIds) {
listener->onDeviceStatusChanged(id, CameraDeviceStatus::NOT_PRESENT);
}
+ lock.lock();
}
+
}
return res;
}
@@ -1301,8 +1335,10 @@
CameraProviderManager::ProviderInfo::ProviderInfo(
const std::string &providerName,
+ const std::string &providerInstance,
CameraProviderManager *manager) :
mProviderName(providerName),
+ mProviderInstance(providerInstance),
mProviderTagid(generateVendorTagId(providerName)),
mUniqueDeviceCount(0),
mManager(manager) {
@@ -1626,7 +1662,7 @@
status_t CameraProviderManager::ProviderInfo::dump(int fd, const Vector<String16>&) const {
dprintf(fd, "== Camera Provider HAL %s (v2.%d, %s) static info: %zu devices: ==\n",
- mProviderName.c_str(),
+ mProviderInstance.c_str(),
mMinorVersion,
mIsRemote ? "remote" : "passthrough",
mDevices.size());
@@ -1942,12 +1978,12 @@
void CameraProviderManager::ProviderInfo::serviceDied(uint64_t cookie,
const wp<hidl::base::V1_0::IBase>& who) {
(void) who;
- ALOGI("Camera provider '%s' has died; removing it", mProviderName.c_str());
+ ALOGI("Camera provider '%s' has died; removing it", mProviderInstance.c_str());
if (cookie != mId) {
ALOGW("%s: Unexpected serviceDied cookie %" PRIu64 ", expected %" PRIu32,
__FUNCTION__, cookie, mId);
}
- mManager->removeProvider(mProviderName);
+ mManager->removeProvider(mProviderInstance);
}
status_t CameraProviderManager::ProviderInfo::setUpVendorTags() {
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index 12bda9b..5531dd7 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -365,6 +365,7 @@
virtual public hardware::hidl_death_recipient
{
const std::string mProviderName;
+ const std::string mProviderInstance;
const metadata_vendor_id_t mProviderTagid;
int mMinorVersion;
sp<VendorTagDescriptor> mVendorTagDescriptor;
@@ -379,7 +380,7 @@
sp<hardware::camera::provider::V2_4::ICameraProvider> mSavedInterface;
- ProviderInfo(const std::string &providerName,
+ ProviderInfo(const std::string &providerName, const std::string &providerInstance,
CameraProviderManager *manager);
~ProviderInfo();
@@ -657,7 +658,10 @@
hardware::hidl_version minVersion = hardware::hidl_version{0,0},
hardware::hidl_version maxVersion = hardware::hidl_version{1000,0}) const;
- status_t addProviderLocked(const std::string& newProvider);
+ status_t addProviderLocked(const std::string& newProvider, bool preexisting = false);
+
+ status_t tryToInitializeProviderLocked(const std::string& providerName,
+ const sp<ProviderInfo>& providerInfo);
bool isLogicalCameraLocked(const std::string& id, std::vector<std::string>* physicalCameraIds);
@@ -666,6 +670,7 @@
bool isValidDeviceLocked(const std::string &id, uint16_t majorVersion) const;
+ size_t mProviderInstanceId = 0;
std::vector<sp<ProviderInfo>> mProviders;
void addProviderToMap(
diff --git a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
index 855b5ab..a74fd9d 100644
--- a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
+++ b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
@@ -23,7 +23,9 @@
#include <android/hardware/camera/device/3.2/ICameraDeviceCallback.h>
#include <android/hardware/camera/device/3.2/ICameraDeviceSession.h>
#include <camera_metadata_hidden.h>
+#include <hidl/HidlBinderSupport.h>
#include <gtest/gtest.h>
+#include <utility>
using namespace android;
using namespace android::hardware::camera;
@@ -173,6 +175,25 @@
return hardware::Void();
}
+ virtual ::android::hardware::Return<bool> linkToDeath(
+ const ::android::sp<::android::hardware::hidl_death_recipient>& recipient,
+ uint64_t cookie) {
+ if (mInitialDeathRecipient.get() == nullptr) {
+ mInitialDeathRecipient =
+ std::make_unique<::android::hardware::hidl_binder_death_recipient>(recipient,
+ cookie, this);
+ }
+ return true;
+ }
+
+ void signalInitialBinderDeathRecipient() {
+ if (mInitialDeathRecipient.get() != nullptr) {
+ mInitialDeathRecipient->binderDied(nullptr /*who*/);
+ }
+ }
+
+ std::unique_ptr<::android::hardware::hidl_binder_death_recipient> mInitialDeathRecipient;
+
enum MethodNames {
SET_CALLBACK,
GET_VENDOR_TAGS,
@@ -567,3 +588,47 @@
ASSERT_EQ(serviceProxy.mLastRequestedServiceNames.back(), testProviderInstanceName) <<
"Incorrect instance requested from service manager";
}
+
+// Test that CameraProviderManager can handle races between provider death notifications and
+// provider registration callbacks
+TEST(CameraProviderManagerTest, BinderDeathRegistrationRaceTest) {
+
+ std::vector<hardware::hidl_string> deviceNames;
+ deviceNames.push_back("device@3.2/test/0");
+ deviceNames.push_back("device@3.2/test/1");
+ hardware::hidl_vec<common::V1_0::VendorTagSection> vendorSection;
+ status_t res;
+
+ sp<CameraProviderManager> providerManager = new CameraProviderManager();
+ sp<TestStatusListener> statusListener = new TestStatusListener();
+ TestInteractionProxy serviceProxy;
+ sp<TestICameraProvider> provider = new TestICameraProvider(deviceNames,
+ vendorSection);
+
+ // Not setting up provider in the service proxy yet, to test cases where a
+ // HAL isn't starting right
+ res = providerManager->initialize(statusListener, &serviceProxy);
+ ASSERT_EQ(res, OK) << "Unable to initialize provider manager";
+
+ // Now set up provider and trigger a registration
+ serviceProxy.setProvider(provider);
+
+ hardware::hidl_string testProviderFqInterfaceName =
+ "android.hardware.camera.provider@2.4::ICameraProvider";
+ hardware::hidl_string testProviderInstanceName = "test/0";
+ serviceProxy.mManagerNotificationInterface->onRegistration(
+ testProviderFqInterfaceName,
+ testProviderInstanceName, false);
+
+ // Simulate artificial delay of the registration callback which arrives before the
+ // death notification
+ serviceProxy.mManagerNotificationInterface->onRegistration(
+ testProviderFqInterfaceName,
+ testProviderInstanceName, false);
+
+ provider->signalInitialBinderDeathRecipient();
+
+ auto deviceCount = static_cast<unsigned> (providerManager->getCameraCount().second);
+ ASSERT_EQ(deviceCount, deviceNames.size()) <<
+ "Unexpected amount of camera devices";
+}
diff --git a/services/mediametrics/AudioAnalytics.cpp b/services/mediametrics/AudioAnalytics.cpp
index 4ef87e4..dbc68b2 100644
--- a/services/mediametrics/AudioAnalytics.cpp
+++ b/services/mediametrics/AudioAnalytics.cpp
@@ -140,7 +140,6 @@
static constexpr const char * const AAudioStreamFields[] {
"mediametrics_aaudiostream_reported",
- "caller_name",
"path",
"direction",
"frames_per_burst",
@@ -156,6 +155,8 @@
"format_app",
"format_device",
"log_session_id",
+ "sample_rate",
+ "content_type",
};
/**
@@ -206,8 +207,10 @@
return { result, ss.str() };
}
-AudioAnalytics::AudioAnalytics()
+AudioAnalytics::AudioAnalytics(const std::shared_ptr<StatsdLog>& statsdLog)
: mDeliverStatistics(property_get_bool(PROP_AUDIO_ANALYTICS_CLOUD_ENABLED, true))
+ , mStatsdLog(statsdLog)
+ , mAudioPowerUsage(this, statsdLog)
{
SetMinimumLogSeverity(android::base::DEBUG); // for LOG().
ALOGD("%s", __func__);
@@ -242,7 +245,7 @@
});
}));
- // Handle legacy aaudio stream statistics
+ // Handle legacy aaudio playback stream statistics
mActions.addAction(
AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK "*." AMEDIAMETRICS_PROP_EVENT,
std::string(AMEDIAMETRICS_PROP_EVENT_VALUE_ENDAAUDIOSTREAM),
@@ -251,6 +254,15 @@
mAAudioStreamInfo.endAAudioStream(item, AAudioStreamInfo::CALLER_PATH_LEGACY);
}));
+ // Handle legacy aaudio capture stream statistics
+ mActions.addAction(
+ AMEDIAMETRICS_KEY_PREFIX_AUDIO_RECORD "*." AMEDIAMETRICS_PROP_EVENT,
+ std::string(AMEDIAMETRICS_PROP_EVENT_VALUE_ENDAAUDIOSTREAM),
+ std::make_shared<AnalyticsActions::Function>(
+ [this](const std::shared_ptr<const android::mediametrics::Item> &item) {
+ mAAudioStreamInfo.endAAudioStream(item, AAudioStreamInfo::CALLER_PATH_LEGACY);
+ }));
+
// Handle mmap aaudio stream statistics
mActions.addAction(
AMEDIAMETRICS_KEY_PREFIX_AUDIO_STREAM "*." AMEDIAMETRICS_PROP_EVENT,
@@ -406,20 +418,6 @@
ll -= l;
}
- if (ll > 0) {
- // Print the statsd atoms we sent out.
- const std::string statsd = mStatsdLog.dumpToString(" " /* prefix */, ll - 1);
- const size_t n = std::count(statsd.begin(), statsd.end(), '\n') + 1; // we control this.
- if ((size_t)ll >= n) {
- if (n == 1) {
- ss << "Statsd atoms: empty or truncated\n";
- } else {
- ss << "Statsd atoms:\n" << statsd;
- }
- ll -= (int32_t)n;
- }
- }
-
if (ll > 0 && prefix == nullptr) {
auto [s, l] = mAudioPowerUsage.dump(ll);
ss << s;
@@ -601,7 +599,8 @@
, logSessionIdForStats.c_str()
);
ALOGV("%s: statsd %s", __func__, str.c_str());
- mAudioAnalytics.mStatsdLog.log("%s", str.c_str());
+ mAudioAnalytics.mStatsdLog->log(
+ android::util::MEDIAMETRICS_AUDIORECORDDEVICEUSAGE_REPORTED, str);
}
} break;
case THREAD: {
@@ -649,7 +648,8 @@
, ENUM_EXTRACT(typeForStats)
);
ALOGV("%s: statsd %s", __func__, str.c_str());
- mAudioAnalytics.mStatsdLog.log("%s", str.c_str());
+ mAudioAnalytics.mStatsdLog->log(
+ android::util::MEDIAMETRICS_AUDIOTHREADDEVICEUSAGE_REPORTED, str);
}
} break;
case TRACK: {
@@ -769,7 +769,8 @@
, logSessionIdForStats.c_str()
);
ALOGV("%s: statsd %s", __func__, str.c_str());
- mAudioAnalytics.mStatsdLog.log("%s", str.c_str());
+ mAudioAnalytics.mStatsdLog->log(
+ android::util::MEDIAMETRICS_AUDIOTRACKDEVICEUSAGE_REPORTED, str);
}
} break;
}
@@ -845,7 +846,8 @@
, /* connection_count */ 1
);
ALOGV("%s: statsd %s", __func__, str.c_str());
- mAudioAnalytics.mStatsdLog.log("%s", str.c_str());
+ mAudioAnalytics.mStatsdLog->log(
+ android::util::MEDIAMETRICS_AUDIODEVICECONNECTION_REPORTED, str);
}
}
}
@@ -898,7 +900,8 @@
, /* connection_count */ 1
);
ALOGV("%s: statsd %s", __func__, str.c_str());
- mAudioAnalytics.mStatsdLog.log("%s", str.c_str());
+ mAudioAnalytics.mStatsdLog->log(
+ android::util::MEDIAMETRICS_AUDIODEVICECONNECTION_REPORTED, str);
}
return;
}
@@ -924,7 +927,8 @@
, /* connection_count */ 1
);
ALOGV("%s: statsd %s", __func__, str.c_str());
- mAudioAnalytics.mStatsdLog.log("%s", str.c_str());
+ mAudioAnalytics.mStatsdLog->log(
+ android::util::MEDIAMETRICS_AUDIODEVICECONNECTION_REPORTED, str);
}
}
@@ -932,12 +936,6 @@
const std::shared_ptr<const android::mediametrics::Item> &item, CallerPath path) const {
const std::string& key = item->getKey();
- std::string callerNameStr;
- mAudioAnalytics.mAnalyticsState->timeMachine().get(
- key, AMEDIAMETRICS_PROP_CALLERNAME, &callerNameStr);
-
- const auto callerName = types::lookup<types::CALLER_NAME, int32_t>(callerNameStr);
-
std::string directionStr;
mAudioAnalytics.mAnalyticsState->timeMachine().get(
key, AMEDIAMETRICS_PROP_DIRECTION, &directionStr);
@@ -960,7 +958,8 @@
key, AMEDIAMETRICS_PROP_CHANNELCOUNT, &channelCount);
int64_t totalFramesTransferred = -1;
- // TODO: log and get total frames transferred
+ mAudioAnalytics.mAnalyticsState->timeMachine().get(
+ key, AMEDIAMETRICS_PROP_FRAMESTRANSFERRED, &totalFramesTransferred);
std::string perfModeRequestedStr;
mAudioAnalytics.mAnalyticsState->timeMachine().get(
@@ -968,8 +967,11 @@
const auto perfModeRequested =
types::lookup<types::AAUDIO_PERFORMANCE_MODE, int32_t>(perfModeRequestedStr);
- int32_t perfModeActual = 0;
- // TODO: log and get actual performance mode
+ std::string perfModeActualStr;
+ mAudioAnalytics.mAnalyticsState->timeMachine().get(
+ key, AMEDIAMETRICS_PROP_PERFORMANCEMODEACTUAL, &perfModeActualStr);
+ const auto perfModeActual =
+ types::lookup<types::AAUDIO_PERFORMANCE_MODE, int32_t>(perfModeActualStr);
std::string sharingModeStr;
mAudioAnalytics.mAnalyticsState->timeMachine().get(
@@ -983,8 +985,10 @@
std::string serializedDeviceTypes;
// TODO: only routed device id is logged, but no device type
- int32_t formatApp = 0;
- // TODO: log format from app
+ std::string formatAppStr;
+ mAudioAnalytics.mAnalyticsState->timeMachine().get(
+ key, AMEDIAMETRICS_PROP_ENCODINGREQUESTED, &formatAppStr);
+ const auto formatApp = types::lookup<types::ENCODING, int32_t>(formatAppStr);
std::string formatDeviceStr;
mAudioAnalytics.mAnalyticsState->timeMachine().get(
@@ -992,10 +996,19 @@
const auto formatDevice = types::lookup<types::ENCODING, int32_t>(formatDeviceStr);
std::string logSessionId;
- // TODO: log logSessionId
+ mAudioAnalytics.mAnalyticsState->timeMachine().get(
+ key, AMEDIAMETRICS_PROP_LOGSESSIONID, &logSessionId);
+
+ int32_t sampleRate = 0;
+ mAudioAnalytics.mAnalyticsState->timeMachine().get(
+ key, AMEDIAMETRICS_PROP_SAMPLERATE, &sampleRate);
+
+ std::string contentTypeStr;
+ mAudioAnalytics.mAnalyticsState->timeMachine().get(
+ key, AMEDIAMETRICS_PROP_CONTENTTYPE, &contentTypeStr);
+ const auto contentType = types::lookup<types::CONTENT_TYPE, int32_t>(contentTypeStr);
LOG(LOG_LEVEL) << "key:" << key
- << " caller_name:" << callerName << "(" << callerNameStr << ")"
<< " path:" << path
<< " direction:" << direction << "(" << directionStr << ")"
<< " frames_per_burst:" << framesPerBurst
@@ -1004,20 +1017,21 @@
<< " channel_count:" << channelCount
<< " total_frames_transferred:" << totalFramesTransferred
<< " perf_mode_requested:" << perfModeRequested << "(" << perfModeRequestedStr << ")"
- << " perf_mode_actual:" << perfModeActual
+ << " perf_mode_actual:" << perfModeActual << "(" << perfModeActualStr << ")"
<< " sharing:" << sharingMode << "(" << sharingModeStr << ")"
<< " xrun_count:" << xrunCount
<< " device_type:" << serializedDeviceTypes
- << " format_app:" << formatApp
+ << " format_app:" << formatApp << "(" << formatAppStr << ")"
<< " format_device: " << formatDevice << "(" << formatDeviceStr << ")"
- << " log_session_id: " << logSessionId;
+ << " log_session_id: " << logSessionId
+ << " sample_rate: " << sampleRate
+ << " content_type: " << contentType << "(" << contentTypeStr << ")";
if (mAudioAnalytics.mDeliverStatistics) {
android::util::BytesField bf_serialized(
serializedDeviceTypes.c_str(), serializedDeviceTypes.size());
const auto result = sendToStatsd(
CONDITION(android::util::MEDIAMETRICS_AAUDIOSTREAM_REPORTED)
- , callerName
, path
, direction
, framesPerBurst
@@ -1033,12 +1047,13 @@
, formatApp
, formatDevice
, logSessionId.c_str()
+ , sampleRate
+ , contentType
);
std::stringstream ss;
ss << "result:" << result;
const auto fieldsStr = printFields(AAudioStreamFields,
CONDITION(android::util::MEDIAMETRICS_AAUDIOSTREAM_REPORTED)
- , callerName
, path
, direction
, framesPerBurst
@@ -1054,11 +1069,13 @@
, formatApp
, formatDevice
, logSessionId.c_str()
+ , sampleRate
+ , contentType
);
ss << " " << fieldsStr;
std::string str = ss.str();
ALOGV("%s: statsd %s", __func__, str.c_str());
- mAudioAnalytics.mStatsdLog.log("%s", str.c_str());
+ mAudioAnalytics.mStatsdLog->log(android::util::MEDIAMETRICS_AAUDIOSTREAM_REPORTED, str);
}
}
diff --git a/services/mediametrics/AudioAnalytics.h b/services/mediametrics/AudioAnalytics.h
index 07872ef..2b41a95 100644
--- a/services/mediametrics/AudioAnalytics.h
+++ b/services/mediametrics/AudioAnalytics.h
@@ -17,10 +17,10 @@
#pragma once
#include <android-base/thread_annotations.h>
-#include <audio_utils/SimpleLog.h>
#include "AnalyticsActions.h"
#include "AnalyticsState.h"
#include "AudioPowerUsage.h"
+#include "StatsdLog.h"
#include "TimedAction.h"
#include "Wrap.h"
@@ -32,7 +32,7 @@
friend AudioPowerUsage;
public:
- AudioAnalytics();
+ explicit AudioAnalytics(const std::shared_ptr<StatsdLog>& statsdLog);
~AudioAnalytics();
/**
@@ -122,8 +122,7 @@
SharedPtrWrap<AnalyticsState> mPreviousAnalyticsState;
TimedAction mTimedAction; // locked internally
-
- SimpleLog mStatsdLog{16 /* log lines */}; // locked internally
+ const std::shared_ptr<StatsdLog> mStatsdLog; // locked internally, ok for multiple threads.
// DeviceUse is a nested class which handles audio device usage accounting.
// We define this class at the end to ensure prior variables all properly constructed.
@@ -212,7 +211,7 @@
AudioAnalytics &mAudioAnalytics;
} mAAudioStreamInfo{*this};
- AudioPowerUsage mAudioPowerUsage{this};
+ AudioPowerUsage mAudioPowerUsage;
};
} // namespace android::mediametrics
diff --git a/services/mediametrics/AudioPowerUsage.cpp b/services/mediametrics/AudioPowerUsage.cpp
index e584f12..ab74c8e 100644
--- a/services/mediametrics/AudioPowerUsage.cpp
+++ b/services/mediametrics/AudioPowerUsage.cpp
@@ -127,14 +127,13 @@
return deviceMask;
}
-/* static */
-void AudioPowerUsage::sendItem(const std::shared_ptr<const mediametrics::Item>& item)
+void AudioPowerUsage::sendItem(const std::shared_ptr<const mediametrics::Item>& item) const
{
int32_t type;
if (!item->getInt32(AUDIO_POWER_USAGE_PROP_TYPE, &type)) return;
- int32_t device;
- if (!item->getInt32(AUDIO_POWER_USAGE_PROP_DEVICE, &device)) return;
+ int32_t audio_device;
+ if (!item->getInt32(AUDIO_POWER_USAGE_PROP_DEVICE, &audio_device)) return;
int64_t duration_ns;
if (!item->getInt64(AUDIO_POWER_USAGE_PROP_DURATION_NS, &duration_ns)) return;
@@ -142,11 +141,24 @@
double volume;
if (!item->getDouble(AUDIO_POWER_USAGE_PROP_VOLUME, &volume)) return;
- (void)android::util::stats_write(android::util::AUDIO_POWER_USAGE_DATA_REPORTED,
- device,
- (int32_t)(duration_ns / NANOS_PER_SECOND),
- (float)volume,
+ const int32_t duration_secs = (int32_t)(duration_ns / NANOS_PER_SECOND);
+ const float average_volume = (float)volume;
+ const int result = android::util::stats_write(android::util::AUDIO_POWER_USAGE_DATA_REPORTED,
+ audio_device,
+ duration_secs,
+ average_volume,
type);
+
+ std::stringstream log;
+ log << "result:" << result << " {"
+ << " mediametrics_audio_power_usage_data_reported:"
+ << android::util::AUDIO_POWER_USAGE_DATA_REPORTED
+ << " audio_device:" << audio_device
+ << " duration_secs:" << duration_secs
+ << " average_volume:" << average_volume
+ << " type:" << type
+ << " }";
+ mStatsdLog->log(android::util::AUDIO_POWER_USAGE_DATA_REPORTED, log.str());
}
bool AudioPowerUsage::saveAsItem_l(
@@ -360,8 +372,10 @@
mPrimaryDevice = device;
}
-AudioPowerUsage::AudioPowerUsage(AudioAnalytics *audioAnalytics)
+AudioPowerUsage::AudioPowerUsage(
+ AudioAnalytics *audioAnalytics, const std::shared_ptr<StatsdLog>& statsdLog)
: mAudioAnalytics(audioAnalytics)
+ , mStatsdLog(statsdLog)
, mDisabled(property_get_bool(PROP_AUDIO_METRICS_DISABLED, AUDIO_METRICS_DISABLED_DEFAULT))
, mIntervalHours(property_get_int32(PROP_AUDIO_METRICS_INTERVAL_HR, INTERVAL_HR_DEFAULT))
{
diff --git a/services/mediametrics/AudioPowerUsage.h b/services/mediametrics/AudioPowerUsage.h
index b705a6a..7021902 100644
--- a/services/mediametrics/AudioPowerUsage.h
+++ b/services/mediametrics/AudioPowerUsage.h
@@ -22,13 +22,15 @@
#include <mutex>
#include <thread>
+#include "StatsdLog.h"
+
namespace android::mediametrics {
class AudioAnalytics;
class AudioPowerUsage {
public:
- explicit AudioPowerUsage(AudioAnalytics *audioAnalytics);
+ AudioPowerUsage(AudioAnalytics *audioAnalytics, const std::shared_ptr<StatsdLog>& statsdLog);
~AudioPowerUsage();
void checkTrackRecord(const std::shared_ptr<const mediametrics::Item>& item, bool isTrack);
@@ -83,12 +85,13 @@
private:
bool saveAsItem_l(int32_t device, int64_t duration, int32_t type, double average_vol)
REQUIRES(mLock);
- static void sendItem(const std::shared_ptr<const mediametrics::Item>& item);
+ void sendItem(const std::shared_ptr<const mediametrics::Item>& item) const;
void collect();
bool saveAsItems_l(int32_t device, int64_t duration, int32_t type, double average_vol)
REQUIRES(mLock);
AudioAnalytics * const mAudioAnalytics;
+ const std::shared_ptr<StatsdLog> mStatsdLog; // mStatsdLog is internally locked
const bool mDisabled;
const int32_t mIntervalHours;
diff --git a/services/mediametrics/AudioTypes.cpp b/services/mediametrics/AudioTypes.cpp
index 44e96ec..838cdd5 100644
--- a/services/mediametrics/AudioTypes.cpp
+++ b/services/mediametrics/AudioTypes.cpp
@@ -77,6 +77,7 @@
{"AUDIO_DEVICE_IN_DEFAULT", 1LL << 28},
// R values above.
{"AUDIO_DEVICE_IN_BLE_HEADSET", 1LL << 29},
+ {"AUDIO_DEVICE_IN_HDMI_EARC", 1LL << 30},
};
return map;
}
@@ -123,7 +124,8 @@
{"AUDIO_DEVICE_OUT_DEFAULT", 1LL << 30},
// R values above.
{"AUDIO_DEVICE_OUT_BLE_HEADSET", 1LL << 31},
- {"AUDIO_DEVICE_OUT_BLE_SPAEKER", 1LL << 32},
+ {"AUDIO_DEVICE_OUT_BLE_SPEAKER", 1LL << 32},
+ {"AUDIO_DEVICE_OUT_HDMI_EARC", 1LL << 33},
};
return map;
}
@@ -158,9 +160,9 @@
// DO NOT MODIFY VALUES(OK to add new ones).
// This may be found in frameworks/av/media/libaaudio/include/aaudio/AAudio.h
static std::unordered_map<std::string, int32_t> map {
- // UNKNOWN is -1
- {"AAUDIO_DIRECTION_OUTPUT", 0},
- {"AAUDIO_DIRECTION_INPUT", 1},
+ // UNKNOWN is 0
+ {"AAUDIO_DIRECTION_OUTPUT", 1 /* AAUDIO_DIRECTION_OUTPUT + 1 */},
+ {"AAUDIO_DIRECTION_INPUT", 2 /* AAUDIO_DIRECTION_INPUT + 1*/},
};
return map;
}
@@ -169,7 +171,7 @@
// DO NOT MODIFY VALUES(OK to add new ones).
// This may be found in frameworks/av/media/libaaudio/include/aaudio/AAudio.h
static std::unordered_map<std::string, int32_t> map {
- // UNKNOWN is -1
+ // UNKNOWN is 0
{"AAUDIO_PERFORMANCE_MODE_NONE", 10},
{"AAUDIO_PERFORMANCE_MODE_POWER_SAVING", 11},
{"AAUDIO_PERFORMANCE_MODE_LOW_LATENCY", 12},
@@ -181,9 +183,9 @@
// DO NOT MODIFY VALUES(OK to add new ones).
// This may be found in frameworks/av/media/libaaudio/include/aaudio/AAudio.h
static std::unordered_map<std::string, int32_t> map {
- // UNKNOWN is -1
- {"AAUDIO_SHARING_MODE_EXCLUSIVE", 0},
- {"AAUDIO_SHARING_MODE_SHARED", 1},
+ // UNKNOWN is 0
+ {"AAUDIO_SHARING_MODE_EXCLUSIVE", 1 /* AAUDIO_SHARING_MODE_EXCLUSIVE + 1 */},
+ {"AAUDIO_SHARING_MODE_SHARED", 2 /* AAUDIO_SHARING_MODE_SHARED + 1 */},
};
return map;
}
@@ -484,7 +486,7 @@
auto& map = getAAudioDirection();
auto it = map.find(direction);
if (it == map.end()) {
- return -1; // return unknown
+ return 0; // return unknown
}
return it->second;
}
@@ -506,7 +508,7 @@
auto& map = getAAudioPerformanceMode();
auto it = map.find(performanceMode);
if (it == map.end()) {
- return -1; // return unknown
+ return 0; // return unknown
}
return it->second;
}
@@ -528,7 +530,7 @@
auto& map = getAAudioSharingMode();
auto it = map.find(sharingMode);
if (it == map.end()) {
- return -1; // return unknown
+ return 0; // return unknown
}
return it->second;
}
diff --git a/services/mediametrics/MediaMetricsService.cpp b/services/mediametrics/MediaMetricsService.cpp
index bfc722e..1d64878 100644
--- a/services/mediametrics/MediaMetricsService.cpp
+++ b/services/mediametrics/MediaMetricsService.cpp
@@ -23,6 +23,7 @@
#include <pwd.h> //getpwuid
+#include <android-base/stringprintf.h>
#include <android/content/pm/IPackageManagerNative.h> // package info
#include <audio_utils/clock.h> // clock conversions
#include <binder/IPCThreadState.h> // get calling uid
@@ -37,6 +38,7 @@
namespace android {
+using base::StringPrintf;
using mediametrics::Item;
using mediametrics::startsWith;
@@ -204,21 +206,19 @@
(void)mAudioAnalytics.submit(sitem, isTrusted);
- (void)dump2Statsd(sitem); // failure should be logged in function.
+ (void)dump2Statsd(sitem, mStatsdLog); // failure should be logged in function.
saveItem(sitem);
return NO_ERROR;
}
status_t MediaMetricsService::dump(int fd, const Vector<String16>& args)
{
- String8 result;
-
if (checkCallingPermission(String16("android.permission.DUMP")) == false) {
- result.appendFormat("Permission Denial: "
+ const std::string result = StringPrintf("Permission Denial: "
"can't dump MediaMetricsService from pid=%d, uid=%d\n",
IPCThreadState::self()->getCallingPid(),
IPCThreadState::self()->getCallingUid());
- write(fd, result.string(), result.size());
+ write(fd, result.c_str(), result.size());
return NO_ERROR;
}
@@ -250,17 +250,18 @@
// dumpsys media.metrics audiotrack,codec
// or dumpsys media.metrics audiotrack codec
- result.append("Recognized parameters:\n");
- result.append("--all show all records\n");
- result.append("--clear clear out saved records\n");
- result.append("--heap show heap usage (top 100)\n");
- result.append("--help display help\n");
- result.append("--prefix X process records for component X\n");
- result.append("--since X X < 0: records from -X seconds in the past\n");
- result.append(" X = 0: ignore\n");
- result.append(" X > 0: records from X seconds since Unix epoch\n");
- result.append("--unreachable show unreachable memory (leaks)\n");
- write(fd, result.string(), result.size());
+ static constexpr char result[] =
+ "Recognized parameters:\n"
+ "--all show all records\n"
+ "--clear clear out saved records\n"
+ "--heap show heap usage (top 100)\n"
+ "--help display help\n"
+ "--prefix X process records for component X\n"
+ "--since X X < 0: records from -X seconds in the past\n"
+ " X = 0: ignore\n"
+ " X > 0: records from X seconds since Unix epoch\n"
+ "--unreachable show unreachable memory (leaks)\n";
+ write(fd, result, std::size(result));
return NO_ERROR;
} else if (args[i] == prefixOption) {
++i;
@@ -286,7 +287,7 @@
unreachable = true;
}
}
-
+ std::stringstream result;
{
std::lock_guard _l(mLock);
@@ -295,21 +296,27 @@
mItems.clear();
mAudioAnalytics.clear();
} else {
- result.appendFormat("Dump of the %s process:\n", kServiceName);
+ result << StringPrintf("Dump of the %s process:\n", kServiceName);
const char *prefixptr = prefix.size() > 0 ? prefix.c_str() : nullptr;
- dumpHeaders(result, sinceNs, prefixptr);
- dumpQueue(result, sinceNs, prefixptr);
+ result << dumpHeaders(sinceNs, prefixptr);
+ result << dumpQueue(sinceNs, prefixptr);
// TODO: maybe consider a better way of dumping audio analytics info.
const int32_t linesToDump = all ? INT32_MAX : 1000;
auto [ dumpString, lines ] = mAudioAnalytics.dump(linesToDump, sinceNs, prefixptr);
- result.append(dumpString.c_str());
+ result << dumpString;
if (lines == linesToDump) {
- result.append("-- some lines may be truncated --\n");
+ result << "-- some lines may be truncated --\n";
}
+
+ // Dump the statsd atoms we sent out.
+ result << "Statsd atoms:\n"
+ << mStatsdLog->dumpToString(" " /* prefix */,
+ all ? STATSD_LOG_LINES_MAX : STATSD_LOG_LINES_DUMP);
}
}
- write(fd, result.string(), result.size());
+ const std::string str = result.str();
+ write(fd, str.c_str(), str.size());
// Check heap and unreachable memory outside of lock.
if (heap) {
@@ -327,38 +334,37 @@
}
// dump headers
-void MediaMetricsService::dumpHeaders(String8 &result, int64_t sinceNs, const char* prefix)
+std::string MediaMetricsService::dumpHeaders(int64_t sinceNs, const char* prefix)
{
+ std::stringstream result;
if (mediametrics::Item::isEnabled()) {
- result.append("Metrics gathering: enabled\n");
+ result << "Metrics gathering: enabled\n";
} else {
- result.append("Metrics gathering: DISABLED via property\n");
+ result << "Metrics gathering: DISABLED via property\n";
}
- result.appendFormat(
+ result << StringPrintf(
"Since Boot: Submissions: %lld Accepted: %lld\n",
(long long)mItemsSubmitted.load(), (long long)mItemsFinalized);
- result.appendFormat(
+ result << StringPrintf(
"Records Discarded: %lld (by Count: %lld by Expiration: %lld)\n",
(long long)mItemsDiscarded, (long long)mItemsDiscardedCount,
(long long)mItemsDiscardedExpire);
if (prefix != nullptr) {
- result.appendFormat("Restricting to prefix %s", prefix);
+ result << "Restricting to prefix " << prefix << "\n";
}
if (sinceNs != 0) {
- result.appendFormat(
- "Emitting Queue entries more recent than: %lld\n",
- (long long)sinceNs);
+ result << "Emitting Queue entries more recent than: " << sinceNs << "\n";
}
+ return result.str();
}
// TODO: should prefix be a set<string>?
-void MediaMetricsService::dumpQueue(String8 &result, int64_t sinceNs, const char* prefix)
+std::string MediaMetricsService::dumpQueue(int64_t sinceNs, const char* prefix)
{
if (mItems.empty()) {
- result.append("empty\n");
- return;
+ return "empty\n";
}
-
+ std::stringstream result;
int slot = 0;
for (const auto &item : mItems) { // TODO: consider std::lower_bound() on mItems
if (item->getTimestamp() < sinceNs) { // sinceNs == 0 means all items shown
@@ -369,9 +375,10 @@
__func__, item->getKey().c_str(), prefix);
continue;
}
- result.appendFormat("%5d: %s\n", slot, item->toString().c_str());
+ result << StringPrintf("%5d: %s\n", slot, item->toString().c_str());
slot++;
}
+ return result.str();
}
//
@@ -538,12 +545,13 @@
return AStatsManager_PULL_SKIP;
}
std::lock_guard _l(mLock);
+ bool dumped = false;
for (auto &item : mPullableItems[key]) {
if (const auto sitem = item.lock()) {
- dump2Statsd(sitem, data);
+ dumped |= dump2Statsd(sitem, data, mStatsdLog);
}
}
mPullableItems[key].clear();
- return AStatsManager_PULL_SUCCESS;
+ return dumped ? AStatsManager_PULL_SUCCESS : AStatsManager_PULL_SKIP;
}
} // namespace android
diff --git a/services/mediametrics/MediaMetricsService.h b/services/mediametrics/MediaMetricsService.h
index 8bc8019..8d0b1cf 100644
--- a/services/mediametrics/MediaMetricsService.h
+++ b/services/mediametrics/MediaMetricsService.h
@@ -100,8 +100,8 @@
bool expirations(const std::shared_ptr<const mediametrics::Item>& item) REQUIRES(mLock);
// support for generating output
- void dumpQueue(String8 &result, int64_t sinceNs, const char* prefix) REQUIRES(mLock);
- void dumpHeaders(String8 &result, int64_t sinceNs, const char* prefix) REQUIRES(mLock);
+ std::string dumpQueue(int64_t sinceNs, const char* prefix) REQUIRES(mLock);
+ std::string dumpHeaders(int64_t sinceNs, const char* prefix) REQUIRES(mLock);
// support statsd pushed atoms
static bool isPullable(const std::string &key);
@@ -124,7 +124,14 @@
std::atomic<int64_t> mItemsSubmitted{}; // accessed outside of lock.
- mediametrics::AudioAnalytics mAudioAnalytics; // mAudioAnalytics is locked internally.
+ // mStatsdLog is locked internally (thread-safe) and shows the last atoms logged
+ static constexpr size_t STATSD_LOG_LINES_MAX = 30; // recent log lines to keep
+ static constexpr size_t STATSD_LOG_LINES_DUMP = 4; // normal amount of lines to dump
+ const std::shared_ptr<mediametrics::StatsdLog> mStatsdLog{
+ std::make_shared<mediametrics::StatsdLog>(STATSD_LOG_LINES_MAX)};
+
+ // mAudioAnalytics is locked internally.
+ mediametrics::AudioAnalytics mAudioAnalytics{mStatsdLog};
std::mutex mLock;
// statistics about our analytics
diff --git a/services/mediametrics/StatsdLog.h b/services/mediametrics/StatsdLog.h
new file mode 100644
index 0000000..e207bac
--- /dev/null
+++ b/services/mediametrics/StatsdLog.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <audio_utils/SimpleLog.h>
+#include <map>
+#include <mutex>
+#include <sstream>
+
+namespace android::mediametrics {
+
+class StatsdLog {
+public:
+ explicit StatsdLog(size_t lines) : mSimpleLog(lines) {}
+
+ void log(int atom, const std::string& string) {
+ {
+ std::lock_guard lock(mLock);
+ ++mCountMap[atom];
+ }
+ mSimpleLog.log("%s", string.c_str());
+ }
+
+ std::string dumpToString(const char *prefix = "", size_t logLines = 0) const {
+ std::stringstream ss;
+
+ { // first print out the atom counts
+ std::lock_guard lock(mLock);
+
+ size_t col = 0;
+ for (const auto& count : mCountMap) {
+ if (col == 8) {
+ col = 0;
+ ss << "\n" << prefix;
+ } else {
+ ss << " ";
+ }
+ ss << "[ " << count.first << " : " << count.second << " ]";
+ ++col;
+ }
+ ss << "\n";
+ }
+
+ // then print out the log lines
+ ss << mSimpleLog.dumpToString(prefix, logLines);
+ return ss.str();
+ }
+
+private:
+ SimpleLog mSimpleLog; // internally locked
+ std::map<int /* atom */, size_t /* count */> mCountMap GUARDED_BY(mLock); // sorted
+ mutable std::mutex mLock;
+};
+
+} // namespace android::mediametrics
diff --git a/services/mediametrics/StringUtils.h b/services/mediametrics/StringUtils.h
index 37ed173..01034d9 100644
--- a/services/mediametrics/StringUtils.h
+++ b/services/mediametrics/StringUtils.h
@@ -16,6 +16,8 @@
#pragma once
+#include <iomanip>
+#include <sstream>
#include <string>
#include <vector>
@@ -146,4 +148,23 @@
return {}; // if not a logSessionId, return an empty string.
}
+inline std::string bytesToString(const std::vector<uint8_t>& bytes, size_t maxSize = SIZE_MAX) {
+ if (bytes.size() == 0) {
+ return "{}";
+ }
+ std::stringstream ss;
+ ss << "{";
+ ss << std::hex << std::setfill('0');
+ maxSize = std::min(maxSize, bytes.size());
+ for (size_t i = 0; i < maxSize; ++i) {
+ ss << " " << std::setw(2) << (int)bytes[i];
+ }
+ if (maxSize != bytes.size()) {
+ ss << " ... }";
+ } else {
+ ss << " }";
+ }
+ return ss.str();
+}
+
} // namespace android::mediametrics::stringutils
diff --git a/services/mediametrics/fuzzer/mediametrics_service_fuzzer.cpp b/services/mediametrics/fuzzer/mediametrics_service_fuzzer.cpp
index 0cb2594..8b0b479 100644
--- a/services/mediametrics/fuzzer/mediametrics_service_fuzzer.cpp
+++ b/services/mediametrics/fuzzer/mediametrics_service_fuzzer.cpp
@@ -320,7 +320,9 @@
void MediaMetricsServiceFuzzer::invokeAudioAnalytics(const uint8_t *data, size_t size) {
FuzzedDataProvider fdp = FuzzedDataProvider(data, size);
- android::mediametrics::AudioAnalytics audioAnalytics;
+ std::shared_ptr<android::mediametrics::StatsdLog> statsdLog =
+ std::make_shared<android::mediametrics::StatsdLog>(10);
+ android::mediametrics::AudioAnalytics audioAnalytics{statsdLog};
while (fdp.remaining_bytes()) {
auto item = std::make_shared<mediametrics::Item>(fdp.ConsumeRandomLengthString().c_str());
diff --git a/services/mediametrics/iface_statsd.cpp b/services/mediametrics/iface_statsd.cpp
index b7c5296..776f878 100644
--- a/services/mediametrics/iface_statsd.cpp
+++ b/services/mediametrics/iface_statsd.cpp
@@ -48,10 +48,7 @@
// has its own routine to handle this.
//
-bool enabled_statsd = true;
-
-using statsd_pusher = bool (*)(const mediametrics::Item *);
-using statsd_puller = bool (*)(const mediametrics::Item *, AStatsEventList *);
+static bool enabled_statsd = true;
namespace {
template<typename Handler, typename... Args>
@@ -68,15 +65,17 @@
}
if (handlers.count(key)) {
- return (handlers.at(key))(item.get(), args...);
+ return (handlers.at(key))(item, args...);
}
return false;
}
} // namespace
// give me a record, I'll look at the type and upload appropriately
-bool dump2Statsd(const std::shared_ptr<const mediametrics::Item>& item) {
- static const std::map<std::string, statsd_pusher> statsd_pushers =
+bool dump2Statsd(
+ const std::shared_ptr<const mediametrics::Item>& item,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog) {
+ static const std::map<std::string, statsd_pusher*> statsd_pushers =
{
{ "audiopolicy", statsd_audiopolicy },
{ "audiorecord", statsd_audiorecord },
@@ -91,15 +90,16 @@
{ "nuplayer2", statsd_nuplayer },
{ "recorder", statsd_recorder },
};
- return dump2StatsdInternal(statsd_pushers, item);
+ return dump2StatsdInternal(statsd_pushers, item, statsdLog);
}
-bool dump2Statsd(const std::shared_ptr<const mediametrics::Item>& item, AStatsEventList* out) {
- static const std::map<std::string, statsd_puller> statsd_pullers =
+bool dump2Statsd(const std::shared_ptr<const mediametrics::Item>& item, AStatsEventList* out,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog) {
+ static const std::map<std::string, statsd_puller*> statsd_pullers =
{
{ "mediadrm", statsd_mediadrm_puller },
};
- return dump2StatsdInternal(statsd_pullers, item, out);
+ return dump2StatsdInternal(statsd_pullers, item, out, statsdLog);
}
} // namespace android
diff --git a/services/mediametrics/iface_statsd.h b/services/mediametrics/iface_statsd.h
index 1b6c79a..c2a8b3c 100644
--- a/services/mediametrics/iface_statsd.h
+++ b/services/mediametrics/iface_statsd.h
@@ -22,26 +22,29 @@
class Item;
}
-extern bool enabled_statsd;
-
+using statsd_pusher = bool (const std::shared_ptr<const mediametrics::Item>& item,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog);
// component specific dumpers
-extern bool statsd_audiopolicy(const mediametrics::Item *);
-extern bool statsd_audiorecord(const mediametrics::Item *);
-extern bool statsd_audiothread(const mediametrics::Item *);
-extern bool statsd_audiotrack(const mediametrics::Item *);
-extern bool statsd_codec(const mediametrics::Item *);
-extern bool statsd_extractor(const mediametrics::Item *);
-extern bool statsd_mediaparser(const mediametrics::Item *);
-extern bool statsd_nuplayer(const mediametrics::Item *);
-extern bool statsd_recorder(const mediametrics::Item *);
+extern statsd_pusher statsd_audiopolicy;
+extern statsd_pusher statsd_audiorecord;
+extern statsd_pusher statsd_audiothread;
+extern statsd_pusher statsd_audiotrack;
+extern statsd_pusher statsd_codec;
+extern statsd_pusher statsd_extractor;
+extern statsd_pusher statsd_mediaparser;
-extern bool statsd_mediadrm(const mediametrics::Item *);
-extern bool statsd_drmmanager(const mediametrics::Item *);
+extern statsd_pusher statsd_nuplayer;
+extern statsd_pusher statsd_recorder;
+extern statsd_pusher statsd_mediadrm;
+extern statsd_pusher statsd_drmmanager;
+using statsd_puller = bool (const std::shared_ptr<const mediametrics::Item>& item,
+ AStatsEventList *, const std::shared_ptr<mediametrics::StatsdLog>& statsdLog);
// component specific pullers
-extern bool statsd_mediadrm_puller(const mediametrics::Item *, AStatsEventList *);
+extern statsd_puller statsd_mediadrm_puller;
-bool dump2Statsd(const std::shared_ptr<const mediametrics::Item>& item);
-bool dump2Statsd(const std::shared_ptr<const mediametrics::Item>& item, AStatsEventList* out);
-
+bool dump2Statsd(const std::shared_ptr<const mediametrics::Item>& item,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog);
+bool dump2Statsd(const std::shared_ptr<const mediametrics::Item>& item, AStatsEventList* out,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog);
} // namespace android
diff --git a/services/mediametrics/statsd_audiopolicy.cpp b/services/mediametrics/statsd_audiopolicy.cpp
index 6ef2f2c..f44b7c4 100644
--- a/services/mediametrics/statsd_audiopolicy.cpp
+++ b/services/mediametrics/statsd_audiopolicy.cpp
@@ -37,16 +37,16 @@
namespace android {
-bool statsd_audiopolicy(const mediametrics::Item *item)
+bool statsd_audiopolicy(const std::shared_ptr<const mediametrics::Item>& item,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog)
{
if (item == nullptr) return false;
// these go into the statsd wrapper
- const nsecs_t timestamp = MediaMetricsService::roundTime(item->getTimestamp());
- std::string pkgName = item->getPkgName();
- int64_t pkgVersionCode = item->getPkgVersionCode();
- int64_t mediaApexVersion = 0;
-
+ const nsecs_t timestamp_nanos = MediaMetricsService::roundTime(item->getTimestamp());
+ const std::string package_name = item->getPkgName();
+ const int64_t package_version_code = item->getPkgVersionCode();
+ const int64_t media_apex_version = 0;
// the rest into our own proto
//
@@ -60,35 +60,35 @@
metrics_proto.set_status(status);
}
//string char kAudioPolicyRqstSrc[] = "android.media.audiopolicy.rqst.src";
- std::string rqst_src;
- if (item->getString("android.media.audiopolicy.rqst.src", &rqst_src)) {
- metrics_proto.set_request_source(std::move(rqst_src));
+ std::string request_source;
+ if (item->getString("android.media.audiopolicy.rqst.src", &request_source)) {
+ metrics_proto.set_request_source(request_source);
}
//string char kAudioPolicyRqstPkg[] = "android.media.audiopolicy.rqst.pkg";
- std::string rqst_pkg;
- if (item->getString("android.media.audiopolicy.rqst.pkg", &rqst_pkg)) {
- metrics_proto.set_request_package(std::move(rqst_pkg));
+ std::string request_package;
+ if (item->getString("android.media.audiopolicy.rqst.pkg", &request_package)) {
+ metrics_proto.set_request_package(request_package);
}
//int32 char kAudioPolicyRqstSession[] = "android.media.audiopolicy.rqst.session";
- int32_t rqst_session = -1;
- if (item->getInt32("android.media.audiopolicy.rqst.session", &rqst_session)) {
- metrics_proto.set_request_session(rqst_session);
+ int32_t request_session = -1;
+ if (item->getInt32("android.media.audiopolicy.rqst.session", &request_session)) {
+ metrics_proto.set_request_session(request_session);
}
//string char kAudioPolicyRqstDevice[] = "android.media.audiopolicy.rqst.device";
- std::string rqst_device;
- if (item->getString("android.media.audiopolicy.rqst.device", &rqst_device)) {
- metrics_proto.set_request_device(std::move(rqst_device));
+ std::string request_device;
+ if (item->getString("android.media.audiopolicy.rqst.device", &request_device)) {
+ metrics_proto.set_request_device(request_device);
}
//string char kAudioPolicyActiveSrc[] = "android.media.audiopolicy.active.src";
- std::string active_src;
- if (item->getString("android.media.audiopolicy.active.src", &active_src)) {
- metrics_proto.set_active_source(std::move(active_src));
+ std::string active_source;
+ if (item->getString("android.media.audiopolicy.active.src", &active_source)) {
+ metrics_proto.set_active_source(active_source);
}
//string char kAudioPolicyActivePkg[] = "android.media.audiopolicy.active.pkg";
- std::string active_pkg;
- if (item->getString("android.media.audiopolicy.active.pkg", &active_pkg)) {
- metrics_proto.set_active_package(std::move(active_pkg));
+ std::string active_package;
+ if (item->getString("android.media.audiopolicy.active.pkg", &active_package)) {
+ metrics_proto.set_active_package(active_package);
}
//int32 char kAudioPolicyActiveSession[] = "android.media.audiopolicy.active.session";
int32_t active_session = -1;
@@ -98,27 +98,40 @@
//string char kAudioPolicyActiveDevice[] = "android.media.audiopolicy.active.device";
std::string active_device;
if (item->getString("android.media.audiopolicy.active.device", &active_device)) {
- metrics_proto.set_active_device(std::move(active_device));
+ metrics_proto.set_active_device(active_device);
}
-
std::string serialized;
if (!metrics_proto.SerializeToString(&serialized)) {
ALOGE("Failed to serialize audipolicy metrics");
return false;
}
- if (enabled_statsd) {
- android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
- (void)android::util::stats_write(android::util::MEDIAMETRICS_AUDIOPOLICY_REPORTED,
- timestamp, pkgName.c_str(), pkgVersionCode,
- mediaApexVersion,
- bf_serialized);
+ android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
+ int result = android::util::stats_write(android::util::MEDIAMETRICS_AUDIOPOLICY_REPORTED,
+ timestamp_nanos, package_name.c_str(), package_version_code,
+ media_apex_version,
+ bf_serialized);
+ std::stringstream log;
+ log << "result:" << result << " {"
+ << " mediametrics_audiopolicy_reported:"
+ << android::util::MEDIAMETRICS_AUDIOPOLICY_REPORTED
+ << " timestamp_nanos:" << timestamp_nanos
+ << " package_name:" << package_name
+ << " package_version_code:" << package_version_code
+ << " media_apex_version:" << media_apex_version
- } else {
- ALOGV("NOT sending: private data (len=%zu)", strlen(serialized.c_str()));
- }
-
+ << " status:" << status
+ << " request_source:" << request_source
+ << " request_package:" << request_package
+ << " request_session:" << request_session
+ << " request_device:" << request_device
+ << " active_source:" << active_source
+ << " active_package:" << active_package
+ << " active_session:" << active_session
+ << " active_device:" << active_device
+ << " }";
+ statsdLog->log(android::util::MEDIAMETRICS_AUDIOPOLICY_REPORTED, log.str());
return true;
}
diff --git a/services/mediametrics/statsd_audiorecord.cpp b/services/mediametrics/statsd_audiorecord.cpp
index db809dc..70a67ae 100644
--- a/services/mediametrics/statsd_audiorecord.cpp
+++ b/services/mediametrics/statsd_audiorecord.cpp
@@ -38,16 +38,15 @@
namespace android {
-bool statsd_audiorecord(const mediametrics::Item *item)
-{
+bool statsd_audiorecord(const std::shared_ptr<const mediametrics::Item>& item,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog) {
if (item == nullptr) return false;
// these go into the statsd wrapper
- const nsecs_t timestamp = MediaMetricsService::roundTime(item->getTimestamp());
- std::string pkgName = item->getPkgName();
- int64_t pkgVersionCode = item->getPkgVersionCode();
- int64_t mediaApexVersion = 0;
-
+ const nsecs_t timestamp_nanos = MediaMetricsService::roundTime(item->getTimestamp());
+ const std::string package_name = item->getPkgName();
+ const int64_t package_version_code = item->getPkgVersionCode();
+ const int64_t media_apex_version = 0;
// the rest into our own proto
//
@@ -57,12 +56,12 @@
//
std::string encoding;
if (item->getString("android.media.audiorecord.encoding", &encoding)) {
- metrics_proto.set_encoding(std::move(encoding));
+ metrics_proto.set_encoding(encoding);
}
std::string source;
if (item->getString("android.media.audiorecord.source", &source)) {
- metrics_proto.set_source(std::move(source));
+ metrics_proto.set_source(source);
}
int32_t latency = -1;
@@ -80,14 +79,14 @@
metrics_proto.set_channels(channels);
}
- int64_t createdMs = -1;
- if (item->getInt64("android.media.audiorecord.createdMs", &createdMs)) {
- metrics_proto.set_created_millis(createdMs);
+ int64_t created_millis = -1;
+ if (item->getInt64("android.media.audiorecord.createdMs", &created_millis)) {
+ metrics_proto.set_created_millis(created_millis);
}
- int64_t durationMs = -1;
- if (item->getInt64("android.media.audiorecord.durationMs", &durationMs)) {
- metrics_proto.set_duration_millis(durationMs);
+ int64_t duration_millis = -1;
+ if (item->getInt64("android.media.audiorecord.durationMs", &duration_millis)) {
+ metrics_proto.set_duration_millis(duration_millis);
}
int32_t count = -1;
@@ -95,44 +94,43 @@
metrics_proto.set_count(count);
}
- int32_t errcode = -1;
- if (item->getInt32("android.media.audiorecord.errcode", &errcode)) {
- metrics_proto.set_error_code(errcode);
- } else if (item->getInt32("android.media.audiorecord.lastError.code", &errcode)) {
- metrics_proto.set_error_code(errcode);
+ int32_t error_code = -1;
+ if (item->getInt32("android.media.audiorecord.errcode", &error_code)) {
+ metrics_proto.set_error_code(error_code);
+ } else if (item->getInt32("android.media.audiorecord.lastError.code", &error_code)) {
+ metrics_proto.set_error_code(error_code);
}
- std::string errfunc;
- if (item->getString("android.media.audiorecord.errfunc", &errfunc)) {
- metrics_proto.set_error_function(std::move(errfunc));
- } else if (item->getString("android.media.audiorecord.lastError.at", &errfunc)) {
- metrics_proto.set_error_function(std::move(errfunc));
+ std::string error_function;
+ if (item->getString("android.media.audiorecord.errfunc", &error_function)) {
+ metrics_proto.set_error_function(error_function);
+ } else if (item->getString("android.media.audiorecord.lastError.at", &error_function)) {
+ metrics_proto.set_error_function(error_function);
}
- // portId (int32)
int32_t port_id = -1;
if (item->getInt32("android.media.audiorecord.portId", &port_id)) {
metrics_proto.set_port_id(count);
}
- // frameCount (int32)
- int32_t frameCount = -1;
- if (item->getInt32("android.media.audiorecord.frameCount", &frameCount)) {
- metrics_proto.set_frame_count(frameCount);
+
+ int32_t frame_count = -1;
+ if (item->getInt32("android.media.audiorecord.frameCount", &frame_count)) {
+ metrics_proto.set_frame_count(frame_count);
}
- // attributes (string)
+
std::string attributes;
if (item->getString("android.media.audiorecord.attributes", &attributes)) {
- metrics_proto.set_attributes(std::move(attributes));
+ metrics_proto.set_attributes(attributes);
}
- // channelMask (int64)
- int64_t channelMask = -1;
- if (item->getInt64("android.media.audiorecord.channelMask", &channelMask)) {
- metrics_proto.set_channel_mask(channelMask);
+
+ int64_t channel_mask = -1;
+ if (item->getInt64("android.media.audiorecord.channelMask", &channel_mask)) {
+ metrics_proto.set_channel_mask(channel_mask);
}
- // startcount (int64)
- int64_t startcount = -1;
- if (item->getInt64("android.media.audiorecord.startcount", &startcount)) {
- metrics_proto.set_start_count(startcount);
+
+ int64_t start_count = -1;
+ if (item->getInt64("android.media.audiorecord.startcount", &start_count)) {
+ metrics_proto.set_start_count(start_count);
}
std::string serialized;
@@ -145,21 +143,44 @@
// log_session_id (string)
std::string logSessionId;
(void)item->getString("android.media.audiorecord.logSessionId", &logSessionId);
- const auto logSessionIdForStats =
+ const auto log_session_id =
mediametrics::stringutils::sanitizeLogSessionId(logSessionId);
- if (enabled_statsd) {
- android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
- (void)android::util::stats_write(android::util::MEDIAMETRICS_AUDIORECORD_REPORTED,
- timestamp, pkgName.c_str(), pkgVersionCode,
- mediaApexVersion,
- bf_serialized,
- logSessionIdForStats.c_str());
+ android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
+ int result = android::util::stats_write(android::util::MEDIAMETRICS_AUDIORECORD_REPORTED,
+ timestamp_nanos, package_name.c_str(), package_version_code,
+ media_apex_version,
+ bf_serialized,
+ log_session_id.c_str());
+ std::stringstream log;
+ log << "result:" << result << " {"
+ << " mediametrics_audiorecord_reported:"
+ << android::util::MEDIAMETRICS_AUDIORECORD_REPORTED
+ << " timestamp_nanos:" << timestamp_nanos
+ << " package_name:" << package_name
+ << " package_version_code:" << package_version_code
+ << " media_apex_version:" << media_apex_version
- } else {
- ALOGV("NOT sending: private data (len=%zu)", strlen(serialized.c_str()));
- }
+ << " encoding:" << encoding
+ << " source:" << source
+ << " latency:" << latency
+ << " samplerate:" << samplerate
+ << " channels:" << channels
+ << " created_millis:" << created_millis
+ << " duration_millis:" << duration_millis
+ << " count:" << count
+ << " error_code:" << error_code
+ << " error_function:" << error_function
+ << " port_id:" << port_id
+ << " frame_count:" << frame_count
+ << " attributes:" << attributes
+ << " channel_mask:" << channel_mask
+ << " start_count:" << start_count
+
+ << " log_session_id:" << log_session_id
+ << " }";
+ statsdLog->log(android::util::MEDIAMETRICS_AUDIORECORD_REPORTED, log.str());
return true;
}
diff --git a/services/mediametrics/statsd_audiothread.cpp b/services/mediametrics/statsd_audiothread.cpp
index 2ad2562..34cc923 100644
--- a/services/mediametrics/statsd_audiothread.cpp
+++ b/services/mediametrics/statsd_audiothread.cpp
@@ -37,16 +37,16 @@
namespace android {
-bool statsd_audiothread(const mediametrics::Item *item)
+bool statsd_audiothread(const std::shared_ptr<const mediametrics::Item>& item,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog)
{
if (item == nullptr) return false;
// these go into the statsd wrapper
- const nsecs_t timestamp = MediaMetricsService::roundTime(item->getTimestamp());
- std::string pkgName = item->getPkgName();
- int64_t pkgVersionCode = item->getPkgVersionCode();
- int64_t mediaApexVersion = 0;
-
+ const nsecs_t timestamp_nanos = MediaMetricsService::roundTime(item->getTimestamp());
+ const std::string package_name = item->getPkgName();
+ const int64_t package_version_code = item->getPkgVersionCode();
+ const int64_t media_apex_version = 0;
// the rest into our own proto
//
@@ -68,17 +68,17 @@
if (item->getInt32(MM_PREFIX "samplerate", &samplerate)) {
metrics_proto.set_samplerate(samplerate);
}
- std::string workhist;
- if (item->getString(MM_PREFIX "workMs.hist", &workhist)) {
- metrics_proto.set_work_millis_hist(std::move(workhist));
+ std::string work_millis_hist;
+ if (item->getString(MM_PREFIX "workMs.hist", &work_millis_hist)) {
+ metrics_proto.set_work_millis_hist(work_millis_hist);
}
- std::string latencyhist;
- if (item->getString(MM_PREFIX "latencyMs.hist", &latencyhist)) {
- metrics_proto.set_latency_millis_hist(std::move(latencyhist));
+ std::string latency_millis_hist;
+ if (item->getString(MM_PREFIX "latencyMs.hist", &latency_millis_hist)) {
+ metrics_proto.set_latency_millis_hist(latency_millis_hist);
}
- std::string warmuphist;
- if (item->getString(MM_PREFIX "warmupMs.hist", &warmuphist)) {
- metrics_proto.set_warmup_millis_hist(std::move(warmuphist));
+ std::string warmup_millis_hist;
+ if (item->getString(MM_PREFIX "warmupMs.hist", &warmup_millis_hist)) {
+ metrics_proto.set_warmup_millis_hist(warmup_millis_hist);
}
int64_t underruns = -1;
if (item->getInt64(MM_PREFIX "underruns", &underruns)) {
@@ -88,101 +88,99 @@
if (item->getInt64(MM_PREFIX "overruns", &overruns)) {
metrics_proto.set_overruns(overruns);
}
- int64_t activeMs = -1;
- if (item->getInt64(MM_PREFIX "activeMs", &activeMs)) {
- metrics_proto.set_active_millis(activeMs);
+ int64_t active_millis = -1;
+ if (item->getInt64(MM_PREFIX "activeMs", &active_millis)) {
+ metrics_proto.set_active_millis(active_millis);
}
- int64_t durationMs = -1;
- if (item->getInt64(MM_PREFIX "durationMs", &durationMs)) {
- metrics_proto.set_duration_millis(durationMs);
+ int64_t duration_millis = -1;
+ if (item->getInt64(MM_PREFIX "durationMs", &duration_millis)) {
+ metrics_proto.set_duration_millis(duration_millis);
}
- // item->setInt32(MM_PREFIX "id", (int32_t)mId); // IO handle
int32_t id = -1;
if (item->getInt32(MM_PREFIX "id", &id)) {
metrics_proto.set_id(id);
}
- // item->setInt32(MM_PREFIX "portId", (int32_t)mPortId);
+
int32_t port_id = -1;
- if (item->getInt32(MM_PREFIX "portId", &id)) {
+ if (item->getInt32(MM_PREFIX "portId", &port_id)) {
metrics_proto.set_port_id(port_id);
}
// item->setCString(MM_PREFIX "type", threadTypeToString(mType));
std::string type;
if (item->getString(MM_PREFIX "type", &type)) {
- metrics_proto.set_type(std::move(type));
+ metrics_proto.set_type(type);
}
- // item->setInt32(MM_PREFIX "sampleRate", (int32_t)mSampleRate);
+
int32_t sample_rate = -1;
if (item->getInt32(MM_PREFIX "sampleRate", &sample_rate)) {
metrics_proto.set_sample_rate(sample_rate);
}
- // item->setInt64(MM_PREFIX "channelMask", (int64_t)mChannelMask);
+
int32_t channel_mask = -1;
if (item->getInt32(MM_PREFIX "channelMask", &channel_mask)) {
metrics_proto.set_channel_mask(channel_mask);
}
- // item->setCString(MM_PREFIX "encoding", toString(mFormat).c_str());
+
std::string encoding;
if (item->getString(MM_PREFIX "encoding", &encoding)) {
- metrics_proto.set_encoding(std::move(encoding));
+ metrics_proto.set_encoding(encoding);
}
- // item->setInt32(MM_PREFIX "frameCount", (int32_t)mFrameCount);
+
int32_t frame_count = -1;
if (item->getInt32(MM_PREFIX "frameCount", &frame_count)) {
metrics_proto.set_frame_count(frame_count);
}
- // item->setCString(MM_PREFIX "outDevice", toString(mOutDevice).c_str());
- std::string outDevice;
- if (item->getString(MM_PREFIX "outDevice", &outDevice)) {
- metrics_proto.set_output_device(std::move(outDevice));
- }
- // item->setCString(MM_PREFIX "inDevice", toString(mInDevice).c_str());
- std::string inDevice;
- if (item->getString(MM_PREFIX "inDevice", &inDevice)) {
- metrics_proto.set_input_device(std::move(inDevice));
- }
- // item->setDouble(MM_PREFIX "ioJitterMs.mean", mIoJitterMs.getMean());
- double iojitters_ms_mean = -1;
- if (item->getDouble(MM_PREFIX "ioJitterMs.mean", &iojitters_ms_mean)) {
- metrics_proto.set_io_jitter_mean_millis(iojitters_ms_mean);
- }
- // item->setDouble(MM_PREFIX "ioJitterMs.std", mIoJitterMs.getStdDev());
- double iojitters_ms_std = -1;
- if (item->getDouble(MM_PREFIX "ioJitterMs.std", &iojitters_ms_std)) {
- metrics_proto.set_io_jitter_stddev_millis(iojitters_ms_std);
- }
- // item->setDouble(MM_PREFIX "processTimeMs.mean", mProcessTimeMs.getMean());
- double process_time_ms_mean = -1;
- if (item->getDouble(MM_PREFIX "processTimeMs.mean", &process_time_ms_mean)) {
- metrics_proto.set_process_time_mean_millis(process_time_ms_mean);
- }
- // item->setDouble(MM_PREFIX "processTimeMs.std", mProcessTimeMs.getStdDev());
- double process_time_ms_std = -1;
- if (item->getDouble(MM_PREFIX "processTimeMs.std", &process_time_ms_std)) {
- metrics_proto.set_process_time_stddev_millis(process_time_ms_std);
- }
- // item->setDouble(MM_PREFIX "timestampJitterMs.mean", tsjitter.getMean());
- double timestamp_jitter_ms_mean = -1;
- if (item->getDouble(MM_PREFIX "timestampJitterMs.mean", ×tamp_jitter_ms_mean)) {
- metrics_proto.set_timestamp_jitter_mean_millis(timestamp_jitter_ms_mean);
- }
- // item->setDouble(MM_PREFIX "timestampJitterMs.std", tsjitter.getStdDev());
- double timestamp_jitter_ms_stddev = -1;
- if (item->getDouble(MM_PREFIX "timestampJitterMs.std", ×tamp_jitter_ms_stddev)) {
- metrics_proto.set_timestamp_jitter_stddev_millis(timestamp_jitter_ms_stddev);
- }
- // item->setDouble(MM_PREFIX "latencyMs.mean", mLatencyMs.getMean());
- double latency_ms_mean = -1;
- if (item->getDouble(MM_PREFIX "latencyMs.mean", &latency_ms_mean)) {
- metrics_proto.set_latency_mean_millis(latency_ms_mean);
- }
- // item->setDouble(MM_PREFIX "latencyMs.std", mLatencyMs.getStdDev());
- double latency_ms_stddev = -1;
- if (item->getDouble(MM_PREFIX "latencyMs.std", &latency_ms_stddev)) {
- metrics_proto.set_latency_stddev_millis(latency_ms_stddev);
+
+ std::string output_device;
+ if (item->getString(MM_PREFIX "outDevice", &output_device)) {
+ metrics_proto.set_output_device(output_device);
}
+ std::string input_device;
+ if (item->getString(MM_PREFIX "inDevice", &input_device)) {
+ metrics_proto.set_input_device(input_device);
+ }
+
+ double io_jitter_mean_millis = -1;
+ if (item->getDouble(MM_PREFIX "ioJitterMs.mean", &io_jitter_mean_millis)) {
+ metrics_proto.set_io_jitter_mean_millis(io_jitter_mean_millis);
+ }
+
+ double io_jitter_stddev_millis = -1;
+ if (item->getDouble(MM_PREFIX "ioJitterMs.std", &io_jitter_stddev_millis)) {
+ metrics_proto.set_io_jitter_stddev_millis(io_jitter_stddev_millis);
+ }
+
+ double process_time_mean_millis = -1;
+ if (item->getDouble(MM_PREFIX "processTimeMs.mean", &process_time_mean_millis)) {
+ metrics_proto.set_process_time_mean_millis(process_time_mean_millis);
+ }
+
+ double process_time_stddev_millis = -1;
+ if (item->getDouble(MM_PREFIX "processTimeMs.std", &process_time_stddev_millis)) {
+ metrics_proto.set_process_time_stddev_millis(process_time_stddev_millis);
+ }
+
+ double timestamp_jitter_mean_millis = -1;
+ if (item->getDouble(MM_PREFIX "timestampJitterMs.mean", ×tamp_jitter_mean_millis)) {
+ metrics_proto.set_timestamp_jitter_mean_millis(timestamp_jitter_mean_millis);
+ }
+
+ double timestamp_jitter_stddev_millis = -1;
+ if (item->getDouble(MM_PREFIX "timestampJitterMs.std", ×tamp_jitter_stddev_millis)) {
+ metrics_proto.set_timestamp_jitter_stddev_millis(timestamp_jitter_stddev_millis);
+ }
+
+ double latency_mean_millis = -1;
+ if (item->getDouble(MM_PREFIX "latencyMs.mean", &latency_mean_millis)) {
+ metrics_proto.set_latency_mean_millis(latency_mean_millis);
+ }
+
+ double latency_stddev_millis = -1;
+ if (item->getDouble(MM_PREFIX "latencyMs.std", &latency_stddev_millis)) {
+ metrics_proto.set_latency_stddev_millis(latency_stddev_millis);
+ }
std::string serialized;
if (!metrics_proto.SerializeToString(&serialized)) {
@@ -190,17 +188,50 @@
return false;
}
- if (enabled_statsd) {
- android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
- (void)android::util::stats_write(android::util::MEDIAMETRICS_AUDIOTHREAD_REPORTED,
- timestamp, pkgName.c_str(), pkgVersionCode,
- mediaApexVersion,
- bf_serialized);
+ android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
+ int result = android::util::stats_write(android::util::MEDIAMETRICS_AUDIOTHREAD_REPORTED,
+ timestamp_nanos, package_name.c_str(), package_version_code,
+ media_apex_version,
+ bf_serialized);
+ std::stringstream log;
+ log << "result:" << result << " {"
+ << " mediametrics_audiothread_reported:"
+ << android::util::MEDIAMETRICS_AUDIOTHREAD_REPORTED
+ << " timestamp_nanos:" << timestamp_nanos
+ << " package_name:" << package_name
+ << " package_version_code:" << package_version_code
+ << " media_apex_version:" << media_apex_version
- } else {
- ALOGV("NOT sending: private data (len=%zu)", strlen(serialized.c_str()));
- }
+ << " type:" << type
+ << " framecount:" << framecount
+ << " samplerate:" << samplerate
+ << " work_millis_hist:" << work_millis_hist
+ << " latency_millis_hist:" << latency_millis_hist
+ << " warmup_millis_hist:" << warmup_millis_hist
+ << " underruns:" << underruns
+ << " overruns:" << overruns
+ << " active_millis:" << active_millis
+ << " duration_millis:" << duration_millis
+ << " id:" << id
+ << " port_id:" << port_id
+ << " sample_rate:" << sample_rate
+ << " channel_mask:" << channel_mask
+ << " encoding:" << encoding
+ << " frame_count:" << frame_count
+ << " output_device:" << output_device
+ << " input_device:" << input_device
+ << " io_jitter_mean_millis:" << io_jitter_mean_millis
+ << " io_jitter_stddev_millis:" << io_jitter_stddev_millis
+
+ << " process_time_mean_millis:" << process_time_mean_millis
+ << " process_time_stddev_millis:" << process_time_stddev_millis
+ << " timestamp_jitter_mean_millis:" << timestamp_jitter_mean_millis
+ << " timestamp_jitter_stddev_millis:" << timestamp_jitter_stddev_millis
+ << " latency_mean_millis:" << latency_mean_millis
+ << " latency_stddev_millis:" << latency_stddev_millis
+ << " }";
+ statsdLog->log(android::util::MEDIAMETRICS_AUDIOTHREAD_REPORTED, log.str());
return true;
}
diff --git a/services/mediametrics/statsd_audiotrack.cpp b/services/mediametrics/statsd_audiotrack.cpp
index fd809c8..fe269a1 100644
--- a/services/mediametrics/statsd_audiotrack.cpp
+++ b/services/mediametrics/statsd_audiotrack.cpp
@@ -38,16 +38,16 @@
namespace android {
-bool statsd_audiotrack(const mediametrics::Item *item)
+bool statsd_audiotrack(const std::shared_ptr<const mediametrics::Item>& item,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog)
{
if (item == nullptr) return false;
// these go into the statsd wrapper
- const nsecs_t timestamp = MediaMetricsService::roundTime(item->getTimestamp());
- std::string pkgName = item->getPkgName();
- int64_t pkgVersionCode = item->getPkgVersionCode();
- int64_t mediaApexVersion = 0;
-
+ const nsecs_t timestamp_nanos = MediaMetricsService::roundTime(item->getTimestamp());
+ const std::string package_name = item->getPkgName();
+ const int64_t package_version_code = item->getPkgVersionCode();
+ const int64_t media_apex_version = 0;
// the rest into our own proto
//
@@ -58,52 +58,52 @@
// static constexpr char kAudioTrackStreamType[] = "android.media.audiotrack.streamtype";
// optional string streamType;
- std::string streamtype;
- if (item->getString("android.media.audiotrack.streamtype", &streamtype)) {
- metrics_proto.set_stream_type(std::move(streamtype));
+ std::string stream_type;
+ if (item->getString("android.media.audiotrack.streamtype", &stream_type)) {
+ metrics_proto.set_stream_type(stream_type);
}
// static constexpr char kAudioTrackContentType[] = "android.media.audiotrack.type";
// optional string contentType;
- std::string contenttype;
- if (item->getString("android.media.audiotrack.type", &contenttype)) {
- metrics_proto.set_content_type(std::move(contenttype));
+ std::string content_type;
+ if (item->getString("android.media.audiotrack.type", &content_type)) {
+ metrics_proto.set_content_type(content_type);
}
// static constexpr char kAudioTrackUsage[] = "android.media.audiotrack.usage";
// optional string trackUsage;
- std::string trackusage;
- if (item->getString("android.media.audiotrack.usage", &trackusage)) {
- metrics_proto.set_track_usage(std::move(trackusage));
+ std::string track_usage;
+ if (item->getString("android.media.audiotrack.usage", &track_usage)) {
+ metrics_proto.set_track_usage(track_usage);
}
// static constexpr char kAudioTrackSampleRate[] = "android.media.audiotrack.samplerate";
// optional int32 samplerate;
- int32_t samplerate = -1;
- if (item->getInt32("android.media.audiotrack.samplerate", &samplerate)) {
- metrics_proto.set_sample_rate(samplerate);
+ int32_t sample_rate = -1;
+ if (item->getInt32("android.media.audiotrack.samplerate", &sample_rate)) {
+ metrics_proto.set_sample_rate(sample_rate);
}
// static constexpr char kAudioTrackChannelMask[] = "android.media.audiotrack.channelmask";
// optional int64 channelMask;
- int64_t channelMask = -1;
- if (item->getInt64("android.media.audiotrack.channelmask", &channelMask)) {
- metrics_proto.set_channel_mask(channelMask);
+ int64_t channel_mask = -1;
+ if (item->getInt64("android.media.audiotrack.channelmask", &channel_mask)) {
+ metrics_proto.set_channel_mask(channel_mask);
}
// NB: These are not yet exposed as public Java API constants.
// static constexpr char kAudioTrackUnderrunFrames[] = "android.media.audiotrack.underrunframes";
// optional int32 underrunframes;
- int32_t underrunframes = -1;
- if (item->getInt32("android.media.audiotrack.underrunframes", &underrunframes)) {
- metrics_proto.set_underrun_frames(underrunframes);
+ int32_t underrun_frames = -1;
+ if (item->getInt32("android.media.audiotrack.underrunframes", &underrun_frames)) {
+ metrics_proto.set_underrun_frames(underrun_frames);
}
// static constexpr char kAudioTrackStartupGlitch[] = "android.media.audiotrack.glitch.startup";
// optional int32 startupglitch;
- int32_t startupglitch = -1;
- if (item->getInt32("android.media.audiotrack.glitch.startup", &startupglitch)) {
- metrics_proto.set_startup_glitch(startupglitch);
+ int32_t startup_glitch = -1;
+ if (item->getInt32("android.media.audiotrack.glitch.startup", &startup_glitch)) {
+ metrics_proto.set_startup_glitch(startup_glitch);
}
// portId (int32)
@@ -114,7 +114,7 @@
// encoding (string)
std::string encoding;
if (item->getString("android.media.audiotrack.encoding", &encoding)) {
- metrics_proto.set_encoding(std::move(encoding));
+ metrics_proto.set_encoding(encoding);
}
// frameCount (int32)
int32_t frame_count = -1;
@@ -124,7 +124,7 @@
// attributes (string)
std::string attributes;
if (item->getString("android.media.audiotrack.attributes", &attributes)) {
- metrics_proto.set_attributes(std::move(attributes));
+ metrics_proto.set_attributes(attributes);
}
std::string serialized;
@@ -137,21 +137,40 @@
// log_session_id (string)
std::string logSessionId;
(void)item->getString("android.media.audiotrack.logSessionId", &logSessionId);
- const auto logSessionIdForStats =
+ const auto log_session_id =
mediametrics::stringutils::sanitizeLogSessionId(logSessionId);
- if (enabled_statsd) {
- android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
- (void)android::util::stats_write(android::util::MEDIAMETRICS_AUDIOTRACK_REPORTED,
- timestamp, pkgName.c_str(), pkgVersionCode,
- mediaApexVersion,
- bf_serialized,
- logSessionIdForStats.c_str());
+ android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
+ int result = android::util::stats_write(android::util::MEDIAMETRICS_AUDIOTRACK_REPORTED,
+ timestamp_nanos, package_name.c_str(), package_version_code,
+ media_apex_version,
+ bf_serialized,
+ log_session_id.c_str());
+ std::stringstream log;
+ log << "result:" << result << " {"
+ << " mediametrics_audiotrack_reported:"
+ << android::util::MEDIAMETRICS_AUDIOTRACK_REPORTED
+ << " timestamp_nanos:" << timestamp_nanos
+ << " package_name:" << package_name
+ << " package_version_code:" << package_version_code
+ << " media_apex_version:" << media_apex_version
- } else {
- ALOGV("NOT sending: private data (len=%zu)", strlen(serialized.c_str()));
- }
+ << " stream_type:" << stream_type
+ << " content_type:" << content_type
+ << " track_usage:" << track_usage
+ << " sample_rate:" << sample_rate
+ << " channel_mask:" << channel_mask
+ << " underrun_frames:" << underrun_frames
+ << " startup_glitch:" << startup_glitch
+ << " port_id:" << port_id
+ << " encoding:" << encoding
+ << " frame_count:" << frame_count
+ << " attributes:" << attributes
+
+ << " log_session_id:" << log_session_id
+ << " }";
+ statsdLog->log(android::util::MEDIAMETRICS_AUDIOTRACK_REPORTED, log.str());
return true;
}
diff --git a/services/mediametrics/statsd_codec.cpp b/services/mediametrics/statsd_codec.cpp
index 1c5ab77..381f441 100644
--- a/services/mediametrics/statsd_codec.cpp
+++ b/services/mediametrics/statsd_codec.cpp
@@ -33,64 +33,64 @@
#include "cleaner.h"
#include "MediaMetricsService.h"
-#include "frameworks/proto_logging/stats/enums/stats/mediametrics/mediametrics.pb.h"
+#include "frameworks/proto_logging/stats/message/mediametrics_message.pb.h"
#include "iface_statsd.h"
namespace android {
-bool statsd_codec(const mediametrics::Item *item)
+bool statsd_codec(const std::shared_ptr<const mediametrics::Item>& item,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog)
{
if (item == nullptr) return false;
// these go into the statsd wrapper
- const nsecs_t timestamp = MediaMetricsService::roundTime(item->getTimestamp());
- std::string pkgName = item->getPkgName();
- int64_t pkgVersionCode = item->getPkgVersionCode();
- int64_t mediaApexVersion = 0;
-
+ const nsecs_t timestamp_nanos = MediaMetricsService::roundTime(item->getTimestamp());
+ const std::string package_name = item->getPkgName();
+ const int64_t package_version_code = item->getPkgVersionCode();
+ const int64_t media_apex_version = 0;
// the rest into our own proto
//
- ::android::stats::mediametrics::CodecData metrics_proto;
+ ::android::stats::mediametrics_message::CodecData metrics_proto;
// flesh out the protobuf we'll hand off with our data
//
// android.media.mediacodec.codec string
std::string codec;
if (item->getString("android.media.mediacodec.codec", &codec)) {
- metrics_proto.set_codec(std::move(codec));
+ metrics_proto.set_codec(codec);
}
- // android.media.mediacodec.mime string
+
std::string mime;
if (item->getString("android.media.mediacodec.mime", &mime)) {
- metrics_proto.set_mime(std::move(mime));
+ metrics_proto.set_mime(mime);
}
- // android.media.mediacodec.mode string
+
std::string mode;
if ( item->getString("android.media.mediacodec.mode", &mode)) {
- metrics_proto.set_mode(std::move(mode));
+ metrics_proto.set_mode(mode);
}
- // android.media.mediacodec.encoder int32
+
int32_t encoder = -1;
if ( item->getInt32("android.media.mediacodec.encoder", &encoder)) {
metrics_proto.set_encoder(encoder);
}
- // android.media.mediacodec.secure int32
+
int32_t secure = -1;
if ( item->getInt32("android.media.mediacodec.secure", &secure)) {
metrics_proto.set_secure(secure);
}
- // android.media.mediacodec.width int32
+
int32_t width = -1;
if ( item->getInt32("android.media.mediacodec.width", &width)) {
metrics_proto.set_width(width);
}
- // android.media.mediacodec.height int32
+
int32_t height = -1;
if ( item->getInt32("android.media.mediacodec.height", &height)) {
metrics_proto.set_height(height);
}
- // android.media.mediacodec.rotation-degrees int32
+
int32_t rotation = -1;
if ( item->getInt32("android.media.mediacodec.rotation-degrees", &rotation)) {
metrics_proto.set_rotation(rotation);
@@ -100,90 +100,89 @@
if ( item->getInt32("android.media.mediacodec.crypto", &crypto)) {
metrics_proto.set_crypto(crypto);
}
- // android.media.mediacodec.profile int32
+
int32_t profile = -1;
if ( item->getInt32("android.media.mediacodec.profile", &profile)) {
metrics_proto.set_profile(profile);
}
- // android.media.mediacodec.level int32
+
int32_t level = -1;
if ( item->getInt32("android.media.mediacodec.level", &level)) {
metrics_proto.set_level(level);
}
- // android.media.mediacodec.maxwidth int32
- int32_t maxwidth = -1;
- if ( item->getInt32("android.media.mediacodec.maxwidth", &maxwidth)) {
- metrics_proto.set_max_width(maxwidth);
+
+ int32_t max_width = -1;
+ if ( item->getInt32("android.media.mediacodec.maxwidth", &max_width)) {
+ metrics_proto.set_max_width(max_width);
}
- // android.media.mediacodec.maxheight int32
- int32_t maxheight = -1;
- if ( item->getInt32("android.media.mediacodec.maxheight", &maxheight)) {
- metrics_proto.set_max_height(maxheight);
+
+ int32_t max_height = -1;
+ if ( item->getInt32("android.media.mediacodec.maxheight", &max_height)) {
+ metrics_proto.set_max_height(max_height);
}
- // android.media.mediacodec.errcode int32
- int32_t errcode = -1;
- if ( item->getInt32("android.media.mediacodec.errcode", &errcode)) {
- metrics_proto.set_error_code(errcode);
+
+ int32_t error_code = -1;
+ if ( item->getInt32("android.media.mediacodec.errcode", &error_code)) {
+ metrics_proto.set_error_code(error_code);
}
- // android.media.mediacodec.errstate string
- std::string errstate;
- if ( item->getString("android.media.mediacodec.errstate", &errstate)) {
- metrics_proto.set_error_state(std::move(errstate));
+
+ std::string error_state;
+ if ( item->getString("android.media.mediacodec.errstate", &error_state)) {
+ metrics_proto.set_error_state(error_state);
}
- // android.media.mediacodec.latency.max int64
+
int64_t latency_max = -1;
if ( item->getInt64("android.media.mediacodec.latency.max", &latency_max)) {
metrics_proto.set_latency_max(latency_max);
}
- // android.media.mediacodec.latency.min int64
+
int64_t latency_min = -1;
if ( item->getInt64("android.media.mediacodec.latency.min", &latency_min)) {
metrics_proto.set_latency_min(latency_min);
}
- // android.media.mediacodec.latency.avg int64
+
int64_t latency_avg = -1;
if ( item->getInt64("android.media.mediacodec.latency.avg", &latency_avg)) {
metrics_proto.set_latency_avg(latency_avg);
}
- // android.media.mediacodec.latency.n int64
+
int64_t latency_count = -1;
if ( item->getInt64("android.media.mediacodec.latency.n", &latency_count)) {
metrics_proto.set_latency_count(latency_count);
}
- // android.media.mediacodec.latency.unknown int64
+
int64_t latency_unknown = -1;
if ( item->getInt64("android.media.mediacodec.latency.unknown", &latency_unknown)) {
metrics_proto.set_latency_unknown(latency_unknown);
}
- // android.media.mediacodec.queueSecureInputBufferError int32
- if (int32_t queueSecureInputBufferError = -1;
- item->getInt32("android.media.mediacodec.queueSecureInputBufferError",
- &queueSecureInputBufferError)) {
- metrics_proto.set_queue_secure_input_buffer_error(queueSecureInputBufferError);
+
+ int32_t queue_secure_input_buffer_error = -1;
+ if (item->getInt32("android.media.mediacodec.queueSecureInputBufferError",
+ &queue_secure_input_buffer_error)) {
+ metrics_proto.set_queue_secure_input_buffer_error(queue_secure_input_buffer_error);
}
- // android.media.mediacodec.queueInputBufferError int32
- if (int32_t queueInputBufferError = -1;
- item->getInt32("android.media.mediacodec.queueInputBufferError",
- &queueInputBufferError)) {
- metrics_proto.set_queue_input_buffer_error(queueInputBufferError);
+
+ int32_t queue_input_buffer_error = -1;
+ if (item->getInt32("android.media.mediacodec.queueInputBufferError",
+ &queue_input_buffer_error)) {
+ metrics_proto.set_queue_input_buffer_error(queue_input_buffer_error);
}
// android.media.mediacodec.latency.hist NOT EMITTED
- // android.media.mediacodec.bitrate_mode string
std::string bitrate_mode;
if (item->getString("android.media.mediacodec.bitrate_mode", &bitrate_mode)) {
- metrics_proto.set_bitrate_mode(std::move(bitrate_mode));
+ metrics_proto.set_bitrate_mode(bitrate_mode);
}
- // android.media.mediacodec.bitrate int32
+
int32_t bitrate = -1;
if (item->getInt32("android.media.mediacodec.bitrate", &bitrate)) {
metrics_proto.set_bitrate(bitrate);
}
- // android.media.mediacodec.lifetimeMs int64
- int64_t lifetimeMs = -1;
- if ( item->getInt64("android.media.mediacodec.lifetimeMs", &lifetimeMs)) {
- lifetimeMs = mediametrics::bucket_time_minutes(lifetimeMs);
- metrics_proto.set_lifetime_millis(lifetimeMs);
+
+ int64_t lifetime_millis = -1;
+ if (item->getInt64("android.media.mediacodec.lifetimeMs", &lifetime_millis)) {
+ lifetime_millis = mediametrics::bucket_time_minutes(lifetime_millis);
+ metrics_proto.set_lifetime_millis(lifetime_millis);
}
// new for S; need to plumb through to westworld
@@ -201,18 +200,51 @@
ALOGE("Failed to serialize codec metrics");
return false;
}
+ android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
+ int result = android::util::stats_write(android::util::MEDIAMETRICS_CODEC_REPORTED,
+ timestamp_nanos, package_name.c_str(), package_version_code,
+ media_apex_version,
+ bf_serialized);
+ std::stringstream log;
+ log << "result:" << result << " {"
+ << " mediametrics_codec_reported:"
+ << android::util::MEDIAMETRICS_CODEC_REPORTED
+ << " timestamp_nanos:" << timestamp_nanos
+ << " package_name:" << package_name
+ << " package_version_code:" << package_version_code
+ << " media_apex_version:" << media_apex_version
- if (enabled_statsd) {
- android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
- (void)android::util::stats_write(android::util::MEDIAMETRICS_CODEC_REPORTED,
- timestamp, pkgName.c_str(), pkgVersionCode,
- mediaApexVersion,
- bf_serialized);
+ << " codec:" << codec
+ << " mime:" << mime
+ << " mode:" << mode
+ << " encoder:" << encoder
+ << " secure:" << secure
+ << " width:" << width
+ << " height:" << height
+ << " rotation:" << rotation
+ << " crypto:" << crypto
+ << " profile:" << profile
- } else {
- ALOGV("NOT sending: private data (len=%zu)", strlen(serialized.c_str()));
- }
+ << " level:" << level
+ << " max_width:" << max_width
+ << " max_height:" << max_height
+ << " error_code:" << error_code
+ << " error_state:" << error_state
+ << " latency_max:" << latency_max
+ << " latency_min:" << latency_min
+ << " latency_avg:" << latency_avg
+ << " latency_count:" << latency_count
+ << " latency_unknown:" << latency_unknown
+ << " queue_input_buffer_error:" << queue_input_buffer_error
+ << " queue_secure_input_buffer_error:" << queue_secure_input_buffer_error
+ << " bitrate_mode:" << bitrate_mode
+ << " bitrate:" << bitrate
+ << " lifetime_millis:" << lifetime_millis
+ // TODO: add when log_session_id is merged.
+ // << " log_session_id:" << log_session_id
+ << " }";
+ statsdLog->log(android::util::MEDIAMETRICS_CODEC_REPORTED, log.str());
return true;
}
diff --git a/services/mediametrics/statsd_drm.cpp b/services/mediametrics/statsd_drm.cpp
index 071c549..73b8872 100644
--- a/services/mediametrics/statsd_drm.cpp
+++ b/services/mediametrics/statsd_drm.cpp
@@ -32,6 +32,7 @@
#include <pwd.h>
#include "MediaMetricsService.h"
+#include "StringUtils.h"
#include "iface_statsd.h"
#include <statslog.h>
@@ -43,53 +44,60 @@
namespace android {
// mediadrm
-bool statsd_mediadrm(const mediametrics::Item *item)
+bool statsd_mediadrm(const std::shared_ptr<const mediametrics::Item>& item,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog)
{
if (item == nullptr) return false;
- const nsecs_t timestamp = MediaMetricsService::roundTime(item->getTimestamp());
- std::string pkgName = item->getPkgName();
- int64_t pkgVersionCode = item->getPkgVersionCode();
- int64_t mediaApexVersion = 0;
+ const nsecs_t timestamp_nanos = MediaMetricsService::roundTime(item->getTimestamp());
+ const std::string package_name = item->getPkgName();
+ const int64_t package_version_code = item->getPkgVersionCode();
+ const int64_t media_apex_version = 0;
std::string vendor;
(void) item->getString("vendor", &vendor);
std::string description;
(void) item->getString("description", &description);
- if (enabled_statsd) {
- // This field is left here for backward compatibility.
- // This field is not used anymore.
- const std::string kUnusedField("unused");
- android::util::BytesField bf_serialized(kUnusedField.c_str(), kUnusedField.size());
- android::util::stats_write(android::util::MEDIAMETRICS_MEDIADRM_REPORTED,
- timestamp, pkgName.c_str(), pkgVersionCode,
- mediaApexVersion,
- vendor.c_str(),
- description.c_str(),
- bf_serialized);
- } else {
- ALOGV("NOT sending: mediadrm data(%s, %s)", vendor.c_str(), description.c_str());
- }
+ // This field is left here for backward compatibility.
+ // This field is not used anymore.
+ const std::string kUnusedField("unused");
+ android::util::BytesField bf_serialized(kUnusedField.c_str(), kUnusedField.size());
+ int result = android::util::stats_write(android::util::MEDIAMETRICS_MEDIADRM_REPORTED,
+ timestamp_nanos, package_name.c_str(), package_version_code,
+ media_apex_version,
+ vendor.c_str(),
+ description.c_str(),
+ bf_serialized);
+ std::stringstream log;
+ log << "result:" << result << " {"
+ << " mediametrics_mediadrm_reported:"
+ << android::util::MEDIAMETRICS_MEDIADRM_REPORTED
+ << " timestamp_nanos:" << timestamp_nanos
+ << " package_name:" << package_name
+ << " package_version_code:" << package_version_code
+ << " media_apex_version:" << media_apex_version
+
+ << " vendor:" << vendor
+ << " description:" << description
+ // omitting serialized
+ << " }";
+ statsdLog->log(android::util::MEDIAMETRICS_MEDIADRM_REPORTED, log.str());
return true;
}
// drmmanager
-bool statsd_drmmanager(const mediametrics::Item *item)
+bool statsd_drmmanager(const std::shared_ptr<const mediametrics::Item>& item,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog)
{
using namespace std::string_literals;
if (item == nullptr) return false;
- if (!enabled_statsd) {
- ALOGV("NOT sending: drmmanager data");
- return true;
- }
-
- const nsecs_t timestamp = MediaMetricsService::roundTime(item->getTimestamp());
- std::string pkgName = item->getPkgName();
- int64_t pkgVersionCode = item->getPkgVersionCode();
- int64_t mediaApexVersion = 0;
+ const nsecs_t timestamp_nanos = MediaMetricsService::roundTime(item->getTimestamp());
+ const std::string package_name = item->getPkgName();
+ const int64_t package_version_code = item->getPkgVersionCode();
+ const int64_t media_apex_version = 0;
std::string plugin_id;
(void) item->getString("plugin_id", &plugin_id);
@@ -107,8 +115,9 @@
item->getInt64(("method"s + std::to_string(i)).c_str(), &methodCounts[i]);
}
- android::util::stats_write(android::util::MEDIAMETRICS_DRMMANAGER_REPORTED,
- timestamp, pkgName.c_str(), pkgVersionCode, mediaApexVersion,
+ const int result = android::util::stats_write(android::util::MEDIAMETRICS_DRMMANAGER_REPORTED,
+ timestamp_nanos, package_name.c_str(), package_version_code,
+ media_apex_version,
plugin_id.c_str(), description.c_str(),
method_id, mime_types.c_str(),
methodCounts[0], methodCounts[1], methodCounts[2],
@@ -117,6 +126,25 @@
methodCounts[9], methodCounts[10], methodCounts[11],
methodCounts[12]);
+ std::stringstream log;
+ log << "result:" << result << " {"
+ << " mediametrics_drmmanager_reported:"
+ << android::util::MEDIAMETRICS_DRMMANAGER_REPORTED
+ << " timestamp_nanos:" << timestamp_nanos
+ << " package_name:" << package_name
+ << " package_version_code:" << package_version_code
+ << " media_apex_version:" << media_apex_version
+
+ << " plugin_id:" << plugin_id
+ << " description:" << description
+ << " method_id:" << method_id
+ << " mime_types:" << mime_types;
+
+ for (size_t i = 0; i < methodCounts.size(); ++i) {
+ log << " method_" << i << ":" << methodCounts[i];
+ }
+ log << " }";
+ statsdLog->log(android::util::MEDIAMETRICS_DRMMANAGER_REPORTED, log.str());
return true;
}
@@ -144,17 +172,14 @@
} // namespace
// |out| and its contents are memory-managed by statsd.
-bool statsd_mediadrm_puller(const mediametrics::Item* item, AStatsEventList* out)
+bool statsd_mediadrm_puller(
+ const std::shared_ptr<const mediametrics::Item>& item, AStatsEventList* out,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog)
{
if (item == nullptr) {
return false;
}
- if (!enabled_statsd) {
- ALOGV("NOT pulling: mediadrm activity");
- return true;
- }
-
std::string serialized_metrics;
(void) item->getString("serialized_metrics", &serialized_metrics);
const auto framework_raw(base64DecodeNoPad(serialized_metrics));
@@ -163,6 +188,11 @@
(void) item->getString("plugin_metrics", &plugin_metrics);
const auto plugin_raw(base64DecodeNoPad(plugin_metrics));
+ if (serialized_metrics.size() == 0 && plugin_metrics.size() == 0) {
+ ALOGD("statsd_mediadrm_puller skipping empty entry");
+ return false;
+ }
+
std::string vendor;
(void) item->getString("vendor", &vendor);
std::string description;
@@ -178,6 +208,19 @@
AStatsEvent_writeByteArray(event, framework_raw.data(), framework_raw.size());
AStatsEvent_writeByteArray(event, plugin_raw.data(), plugin_raw.size());
AStatsEvent_build(event);
+
+ std::stringstream log;
+ log << "pulled:" << " {"
+ << " media_drm_activity_info:"
+ << android::util::MEDIA_DRM_ACTIVITY_INFO
+ << " package_name:" << item->getPkgName()
+ << " package_version_code:" << item->getPkgVersionCode()
+ << " vendor:" << vendor
+ << " description:" << description
+ << " framework_metrics:" << mediametrics::stringutils::bytesToString(framework_raw, 8)
+ << " vendor_metrics:" << mediametrics::stringutils::bytesToString(plugin_raw, 8)
+ << " }";
+ statsdLog->log(android::util::MEDIA_DRM_ACTIVITY_INFO, log.str());
return true;
}
diff --git a/services/mediametrics/statsd_extractor.cpp b/services/mediametrics/statsd_extractor.cpp
index 4180e0c..e228f07 100644
--- a/services/mediametrics/statsd_extractor.cpp
+++ b/services/mediametrics/statsd_extractor.cpp
@@ -37,16 +37,16 @@
namespace android {
-bool statsd_extractor(const mediametrics::Item *item)
+bool statsd_extractor(const std::shared_ptr<const mediametrics::Item>& item,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog)
{
if (item == nullptr) return false;
// these go into the statsd wrapper
- const nsecs_t timestamp = MediaMetricsService::roundTime(item->getTimestamp());
- std::string pkgName = item->getPkgName();
- int64_t pkgVersionCode = item->getPkgVersionCode();
- int64_t mediaApexVersion = 0;
-
+ const nsecs_t timestamp_nanos = MediaMetricsService::roundTime(item->getTimestamp());
+ const std::string package_name = item->getPkgName();
+ const int64_t package_version_code = item->getPkgVersionCode();
+ const int64_t media_apex_version = 0;
// the rest into our own proto
//
@@ -55,26 +55,25 @@
// flesh out the protobuf we'll hand off with our data
//
- // android.media.mediaextractor.fmt string
- std::string fmt;
- if (item->getString("android.media.mediaextractor.fmt", &fmt)) {
- metrics_proto.set_format(std::move(fmt));
- }
- // android.media.mediaextractor.mime string
- std::string mime;
- if (item->getString("android.media.mediaextractor.mime", &mime)) {
- metrics_proto.set_mime(std::move(mime));
- }
- // android.media.mediaextractor.ntrk int32
- int32_t ntrk = -1;
- if (item->getInt32("android.media.mediaextractor.ntrk", &ntrk)) {
- metrics_proto.set_tracks(ntrk);
+ std::string format;
+ if (item->getString("android.media.mediaextractor.fmt", &format)) {
+ metrics_proto.set_format(format);
}
- // android.media.mediaextractor.entry string
+ std::string mime;
+ if (item->getString("android.media.mediaextractor.mime", &mime)) {
+ metrics_proto.set_mime(mime);
+ }
+
+ int32_t tracks = -1;
+ if (item->getInt32("android.media.mediaextractor.ntrk", &tracks)) {
+ metrics_proto.set_tracks(tracks);
+ }
+
std::string entry_point_string;
+ stats::mediametrics::ExtractorData::EntryPoint entry_point =
+ stats::mediametrics::ExtractorData_EntryPoint_OTHER;
if (item->getString("android.media.mediaextractor.entry", &entry_point_string)) {
- stats::mediametrics::ExtractorData::EntryPoint entry_point;
if (entry_point_string == "sdk") {
entry_point = stats::mediametrics::ExtractorData_EntryPoint_SDK;
} else if (entry_point_string == "ndk-with-jvm") {
@@ -93,17 +92,30 @@
return false;
}
- if (enabled_statsd) {
- android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
- (void)android::util::stats_write(android::util::MEDIAMETRICS_EXTRACTOR_REPORTED,
- timestamp, pkgName.c_str(), pkgVersionCode,
- mediaApexVersion,
- bf_serialized);
+ android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
+ int result = android::util::stats_write(android::util::MEDIAMETRICS_EXTRACTOR_REPORTED,
+ timestamp_nanos, package_name.c_str(), package_version_code,
+ media_apex_version,
+ bf_serialized);
+ std::stringstream log;
+ log << "result:" << result << " {"
+ << " mediametrics_extractor_reported:"
+ << android::util::MEDIAMETRICS_EXTRACTOR_REPORTED
+ << " timestamp_nanos:" << timestamp_nanos
+ << " package_name:" << package_name
+ << " package_version_code:" << package_version_code
+ << " media_apex_version:" << media_apex_version
- } else {
- ALOGV("NOT sending: private data (len=%zu)", strlen(serialized.c_str()));
- }
+ << " format:" << format
+ << " mime:" << mime
+ << " tracks:" << tracks
+ << " entry_point:" << entry_point_string << "(" << entry_point << ")"
+ // TODO: Add MediaExtractor log_session_id
+ // << " log_session_id:" << log_session_id
+
+ << " }";
+ statsdLog->log(android::util::MEDIAMETRICS_EXTRACTOR_REPORTED, log.str());
return true;
}
diff --git a/services/mediametrics/statsd_mediaparser.cpp b/services/mediametrics/statsd_mediaparser.cpp
index 262b2ae..f543425 100644
--- a/services/mediametrics/statsd_mediaparser.cpp
+++ b/services/mediametrics/statsd_mediaparser.cpp
@@ -36,16 +36,15 @@
namespace android {
-bool statsd_mediaparser(const mediametrics::Item *item)
+bool statsd_mediaparser(const std::shared_ptr<const mediametrics::Item>& item,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog)
{
- if (item == nullptr) {
- return false;
- }
+ static constexpr bool enabled_statsd = true; // TODO: Remove, dup with dump2StatsdInternal().
+ if (item == nullptr) return false;
- // statsd wrapper data.
- const nsecs_t timestamp = MediaMetricsService::roundTime(item->getTimestamp());
- std::string pkgName = item->getPkgName();
- int64_t pkgVersionCode = item->getPkgVersionCode();
+ const nsecs_t timestamp_nanos = MediaMetricsService::roundTime(item->getTimestamp());
+ const std::string package_name = item->getPkgName();
+ const int64_t package_version_code = item->getPkgVersionCode();
std::string parserName;
item->getString("android.media.mediaparser.parserName", &parserName);
@@ -82,9 +81,9 @@
if (enabled_statsd) {
(void) android::util::stats_write(android::util::MEDIAMETRICS_MEDIAPARSER_REPORTED,
- timestamp,
- pkgName.c_str(),
- pkgVersionCode,
+ timestamp_nanos,
+ package_name.c_str(),
+ package_version_code,
parserName.c_str(),
createdByName,
parserPool.c_str(),
@@ -99,7 +98,29 @@
} else {
ALOGV("NOT sending MediaParser media metrics.");
}
-
+ // TODO: Cleanup after playback_id is merged.
+ std::stringstream log;
+ log << "result:" << "(result)" << " {"
+ << " mediametrics_mediaparser_reported:"
+ << android::util::MEDIAMETRICS_MEDIAPARSER_REPORTED
+ << " timestamp_nanos:" << timestamp_nanos
+ << " package_name:" << package_name
+ << " package_version_code:" << package_version_code
+ << " parser_name:" << parserName
+ << " created_by_name:" << createdByName
+ << " parser_pool:" << parserPool
+ << " last_exception:" << lastException
+ << " resource_byte_count:" << resourceByteCount
+ << " duration_millis:" << durationMillis
+ << " track_mime_types:" << trackMimeTypes
+ << " track_codecs:" << trackCodecs
+ << " altered_parameters:" << alteredParameters
+ << " video_width:" << videoWidth
+ << " video_height:" << videoHeight
+ // TODO: Add MediaParser playback_id
+ // << " playback_id:" << playbackId
+ << " }";
+ statsdLog->log(android::util::MEDIAMETRICS_MEDIAPARSER_REPORTED, log.str());
return true;
}
diff --git a/services/mediametrics/statsd_nuplayer.cpp b/services/mediametrics/statsd_nuplayer.cpp
index a8d0f55..33da81e 100644
--- a/services/mediametrics/statsd_nuplayer.cpp
+++ b/services/mediametrics/statsd_nuplayer.cpp
@@ -41,16 +41,16 @@
* handles nuplayer AND nuplayer2
* checks for the union of what the two players generate
*/
-bool statsd_nuplayer(const mediametrics::Item *item)
+bool statsd_nuplayer(const std::shared_ptr<const mediametrics::Item>& item,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog)
{
if (item == nullptr) return false;
// these go into the statsd wrapper
- const nsecs_t timestamp = MediaMetricsService::roundTime(item->getTimestamp());
- std::string pkgName = item->getPkgName();
- int64_t pkgVersionCode = item->getPkgVersionCode();
- int64_t mediaApexVersion = 0;
-
+ const nsecs_t timestamp_nanos = MediaMetricsService::roundTime(item->getTimestamp());
+ const std::string package_name = item->getPkgName();
+ const int64_t package_version_code = item->getPkgVersionCode();
+ const int64_t media_apex_version = 0;
// the rest into our own proto
//
@@ -60,15 +60,16 @@
//
// differentiate between nuplayer and nuplayer2
- metrics_proto.set_whichplayer(item->getKey().c_str());
+ std::string whichPlayer = item->getKey();
+ metrics_proto.set_whichplayer(whichPlayer.c_str());
std::string video_mime;
if (item->getString("android.media.mediaplayer.video.mime", &video_mime)) {
- metrics_proto.set_video_mime(std::move(video_mime));
+ metrics_proto.set_video_mime(video_mime);
}
std::string video_codec;
if (item->getString("android.media.mediaplayer.video.codec", &video_codec)) {
- metrics_proto.set_video_codec(std::move(video_codec));
+ metrics_proto.set_video_codec(video_codec);
}
int32_t width = -1;
@@ -92,32 +93,32 @@
if (item->getInt64("android.media.mediaplayer.startupdropped", &frames_dropped_startup)) {
metrics_proto.set_frames_dropped_startup(frames_dropped_startup);
}
- double fps = -1.0;
- if (item->getDouble("android.media.mediaplayer.fps", &fps)) {
- metrics_proto.set_framerate(fps);
+ double framerate = -1.0;
+ if (item->getDouble("android.media.mediaplayer.fps", &framerate)) {
+ metrics_proto.set_framerate(framerate);
}
std::string audio_mime;
if (item->getString("android.media.mediaplayer.audio.mime", &audio_mime)) {
- metrics_proto.set_audio_mime(std::move(audio_mime));
+ metrics_proto.set_audio_mime(audio_mime);
}
std::string audio_codec;
if (item->getString("android.media.mediaplayer.audio.codec", &audio_codec)) {
- metrics_proto.set_audio_codec(std::move(audio_codec));
+ metrics_proto.set_audio_codec(audio_codec);
}
- int64_t duration_ms = -1;
- if (item->getInt64("android.media.mediaplayer.durationMs", &duration_ms)) {
- metrics_proto.set_duration_millis(duration_ms);
+ int64_t duration_millis = -1;
+ if (item->getInt64("android.media.mediaplayer.durationMs", &duration_millis)) {
+ metrics_proto.set_duration_millis(duration_millis);
}
- int64_t playing_ms = -1;
- if (item->getInt64("android.media.mediaplayer.playingMs", &playing_ms)) {
- metrics_proto.set_playing_millis(playing_ms);
+ int64_t playing_millis = -1;
+ if (item->getInt64("android.media.mediaplayer.playingMs", &playing_millis)) {
+ metrics_proto.set_playing_millis(playing_millis);
}
- int32_t err = -1;
- if (item->getInt32("android.media.mediaplayer.err", &err)) {
- metrics_proto.set_error(err);
+ int32_t error = -1;
+ if (item->getInt32("android.media.mediaplayer.err", &error)) {
+ metrics_proto.set_error(error);
}
int32_t error_code = -1;
if (item->getInt32("android.media.mediaplayer.errcode", &error_code)) {
@@ -125,45 +126,74 @@
}
std::string error_state;
if (item->getString("android.media.mediaplayer.errstate", &error_state)) {
- metrics_proto.set_error_state(std::move(error_state));
+ metrics_proto.set_error_state(error_state);
}
std::string data_source_type;
if (item->getString("android.media.mediaplayer.dataSource", &data_source_type)) {
- metrics_proto.set_data_source_type(std::move(data_source_type));
+ metrics_proto.set_data_source_type(data_source_type);
}
- int64_t rebufferingMs = -1;
- if (item->getInt64("android.media.mediaplayer.rebufferingMs", &rebufferingMs)) {
- metrics_proto.set_rebuffering_millis(rebufferingMs);
+ int64_t rebuffering_millis = -1;
+ if (item->getInt64("android.media.mediaplayer.rebufferingMs", &rebuffering_millis)) {
+ metrics_proto.set_rebuffering_millis(rebuffering_millis);
}
int32_t rebuffers = -1;
if (item->getInt32("android.media.mediaplayer.rebuffers", &rebuffers)) {
metrics_proto.set_rebuffers(rebuffers);
}
- int32_t rebufferExit = -1;
- if (item->getInt32("android.media.mediaplayer.rebufferExit", &rebufferExit)) {
- metrics_proto.set_rebuffer_at_exit(rebufferExit);
+ int32_t rebuffer_at_exit = -1;
+ if (item->getInt32("android.media.mediaplayer.rebufferExit", &rebuffer_at_exit)) {
+ metrics_proto.set_rebuffer_at_exit(rebuffer_at_exit);
}
-
std::string serialized;
if (!metrics_proto.SerializeToString(&serialized)) {
ALOGE("Failed to serialize nuplayer metrics");
return false;
}
- if (enabled_statsd) {
- android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
- (void)android::util::stats_write(android::util::MEDIAMETRICS_NUPLAYER_REPORTED,
- timestamp, pkgName.c_str(), pkgVersionCode,
- mediaApexVersion,
- bf_serialized);
+ android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
+ int result = android::util::stats_write(android::util::MEDIAMETRICS_NUPLAYER_REPORTED,
+ timestamp_nanos, package_name.c_str(), package_version_code,
+ media_apex_version,
+ bf_serialized);
- } else {
- ALOGV("NOT sending: private data (len=%zu)", strlen(serialized.c_str()));
- }
+ std::stringstream log;
+ log << "result:" << result << " {"
+ << " mediametrics_nuplayer_reported:"
+ << android::util::MEDIAMETRICS_NUPLAYER_REPORTED
+ << " timestamp_nanos:" << timestamp_nanos
+ << " package_name:" << package_name
+ << " package_version_code:" << package_version_code
+ << " media_apex_version:" << media_apex_version
+ << " whichPlayer:" << whichPlayer
+ << " video_mime:" << video_mime
+ << " video_codec:" << video_codec
+ << " width:" << width
+ << " height:" << height
+ << " frames:" << frames
+ << " frames_dropped:" << frames_dropped
+ << " framerate:" << framerate
+ << " audio_mime:" << audio_mime
+ << " audio_codec:" << media_apex_version
+
+ << " duration_millis:" << duration_millis
+ << " playing_millis:" << playing_millis
+ << " error:" << error
+ << " error_code:" << error_code
+ << " error_state:" << error_state
+ << " data_source_type:" << data_source_type
+ << " rebuffering_millis:" << rebuffering_millis
+ << " rebuffers:" << rebuffers
+ << " rebuffer_at_exit:" << rebuffer_at_exit
+ << " frames_dropped_startup:" << frames_dropped_startup
+
+ // TODO NuPlayer - add log_session_id
+ // << " log_session_id:" << log_session_id
+ << " }";
+ statsdLog->log(android::util::MEDIAMETRICS_NUPLAYER_REPORTED, log.str());
return true;
}
diff --git a/services/mediametrics/statsd_recorder.cpp b/services/mediametrics/statsd_recorder.cpp
index 2e5ada4..23b884f 100644
--- a/services/mediametrics/statsd_recorder.cpp
+++ b/services/mediametrics/statsd_recorder.cpp
@@ -37,16 +37,16 @@
namespace android {
-bool statsd_recorder(const mediametrics::Item *item)
+bool statsd_recorder(const std::shared_ptr<const mediametrics::Item>& item,
+ const std::shared_ptr<mediametrics::StatsdLog>& statsdLog)
{
if (item == nullptr) return false;
// these go into the statsd wrapper
- const nsecs_t timestamp = MediaMetricsService::roundTime(item->getTimestamp());
- std::string pkgName = item->getPkgName();
- int64_t pkgVersionCode = item->getPkgVersionCode();
- int64_t mediaApexVersion = 0;
-
+ const nsecs_t timestamp_nanos = MediaMetricsService::roundTime(item->getTimestamp());
+ const std::string package_name = item->getPkgName();
+ const int64_t package_version_code = item->getPkgVersionCode();
+ const int64_t media_apex_version = 0;
// the rest into our own proto
//
@@ -58,22 +58,22 @@
// string kRecorderAudioMime = "android.media.mediarecorder.audio.mime";
std::string audio_mime;
if (item->getString("android.media.mediarecorder.audio.mime", &audio_mime)) {
- metrics_proto.set_audio_mime(std::move(audio_mime));
+ metrics_proto.set_audio_mime(audio_mime);
}
// string kRecorderVideoMime = "android.media.mediarecorder.video.mime";
std::string video_mime;
if (item->getString("android.media.mediarecorder.video.mime", &video_mime)) {
- metrics_proto.set_video_mime(std::move(video_mime));
+ metrics_proto.set_video_mime(video_mime);
}
// int32 kRecorderVideoProfile = "android.media.mediarecorder.video-encoder-profile";
- int32_t videoProfile = -1;
- if (item->getInt32("android.media.mediarecorder.video-encoder-profile", &videoProfile)) {
- metrics_proto.set_video_profile(videoProfile);
+ int32_t video_profile = -1;
+ if (item->getInt32("android.media.mediarecorder.video-encoder-profile", &video_profile)) {
+ metrics_proto.set_video_profile(video_profile);
}
// int32 kRecorderVideoLevel = "android.media.mediarecorder.video-encoder-level";
- int32_t videoLevel = -1;
- if (item->getInt32("android.media.mediarecorder.video-encoder-level", &videoLevel)) {
- metrics_proto.set_video_level(videoLevel);
+ int32_t video_level = -1;
+ if (item->getInt32("android.media.mediarecorder.video-encoder-level", &video_level)) {
+ metrics_proto.set_video_level(video_level);
}
// int32 kRecorderWidth = "android.media.mediarecorder.width";
int32_t width = -1;
@@ -97,73 +97,73 @@
}
// int32 kRecorderCaptureFps = "android.media.mediarecorder.capture-fps";
- int32_t captureFps = -1;
- if (item->getInt32("android.media.mediarecorder.capture-fps", &captureFps)) {
- metrics_proto.set_capture_fps(captureFps);
+ int32_t capture_fps = -1;
+ if (item->getInt32("android.media.mediarecorder.capture-fps", &capture_fps)) {
+ metrics_proto.set_capture_fps(capture_fps);
}
// double kRecorderCaptureFpsEnable = "android.media.mediarecorder.capture-fpsenable";
- double captureFpsEnable = -1;
- if (item->getDouble("android.media.mediarecorder.capture-fpsenable", &captureFpsEnable)) {
- metrics_proto.set_capture_fps_enable(captureFpsEnable);
+ double capture_fps_enable = -1;
+ if (item->getDouble("android.media.mediarecorder.capture-fpsenable", &capture_fps_enable)) {
+ metrics_proto.set_capture_fps_enable(capture_fps_enable);
}
// int64 kRecorderDurationMs = "android.media.mediarecorder.durationMs";
- int64_t durationMs = -1;
- if (item->getInt64("android.media.mediarecorder.durationMs", &durationMs)) {
- metrics_proto.set_duration_millis(durationMs);
+ int64_t duration_millis = -1;
+ if (item->getInt64("android.media.mediarecorder.durationMs", &duration_millis)) {
+ metrics_proto.set_duration_millis(duration_millis);
}
// int64 kRecorderPaused = "android.media.mediarecorder.pausedMs";
- int64_t pausedMs = -1;
- if (item->getInt64("android.media.mediarecorder.pausedMs", &pausedMs)) {
- metrics_proto.set_paused_millis(pausedMs);
+ int64_t paused_millis = -1;
+ if (item->getInt64("android.media.mediarecorder.pausedMs", &paused_millis)) {
+ metrics_proto.set_paused_millis(paused_millis);
}
// int32 kRecorderNumPauses = "android.media.mediarecorder.NPauses";
- int32_t pausedCount = -1;
- if (item->getInt32("android.media.mediarecorder.NPauses", &pausedCount)) {
- metrics_proto.set_paused_count(pausedCount);
+ int32_t paused_count = -1;
+ if (item->getInt32("android.media.mediarecorder.NPauses", &paused_count)) {
+ metrics_proto.set_paused_count(paused_count);
}
// int32 kRecorderAudioBitrate = "android.media.mediarecorder.audio-bitrate";
- int32_t audioBitrate = -1;
- if (item->getInt32("android.media.mediarecorder.audio-bitrate", &audioBitrate)) {
- metrics_proto.set_audio_bitrate(audioBitrate);
+ int32_t audio_bitrate = -1;
+ if (item->getInt32("android.media.mediarecorder.audio-bitrate", &audio_bitrate)) {
+ metrics_proto.set_audio_bitrate(audio_bitrate);
}
// int32 kRecorderAudioChannels = "android.media.mediarecorder.audio-channels";
- int32_t audioChannels = -1;
- if (item->getInt32("android.media.mediarecorder.audio-channels", &audioChannels)) {
- metrics_proto.set_audio_channels(audioChannels);
+ int32_t audio_channels = -1;
+ if (item->getInt32("android.media.mediarecorder.audio-channels", &audio_channels)) {
+ metrics_proto.set_audio_channels(audio_channels);
}
// int32 kRecorderAudioSampleRate = "android.media.mediarecorder.audio-samplerate";
- int32_t audioSampleRate = -1;
- if (item->getInt32("android.media.mediarecorder.audio-samplerate", &audioSampleRate)) {
- metrics_proto.set_audio_samplerate(audioSampleRate);
+ int32_t audio_samplerate = -1;
+ if (item->getInt32("android.media.mediarecorder.audio-samplerate", &audio_samplerate)) {
+ metrics_proto.set_audio_samplerate(audio_samplerate);
}
// int32 kRecorderMovieTimescale = "android.media.mediarecorder.movie-timescale";
- int32_t movieTimescale = -1;
- if (item->getInt32("android.media.mediarecorder.movie-timescale", &movieTimescale)) {
- metrics_proto.set_movie_timescale(movieTimescale);
+ int32_t movie_timescale = -1;
+ if (item->getInt32("android.media.mediarecorder.movie-timescale", &movie_timescale)) {
+ metrics_proto.set_movie_timescale(movie_timescale);
}
// int32 kRecorderAudioTimescale = "android.media.mediarecorder.audio-timescale";
- int32_t audioTimescale = -1;
- if (item->getInt32("android.media.mediarecorder.audio-timescale", &audioTimescale)) {
- metrics_proto.set_audio_timescale(audioTimescale);
+ int32_t audio_timescale = -1;
+ if (item->getInt32("android.media.mediarecorder.audio-timescale", &audio_timescale)) {
+ metrics_proto.set_audio_timescale(audio_timescale);
}
// int32 kRecorderVideoTimescale = "android.media.mediarecorder.video-timescale";
- int32_t videoTimescale = -1;
- if (item->getInt32("android.media.mediarecorder.video-timescale", &videoTimescale)) {
- metrics_proto.set_video_timescale(videoTimescale);
+ int32_t video_timescale = -1;
+ if (item->getInt32("android.media.mediarecorder.video-timescale", &video_timescale)) {
+ metrics_proto.set_video_timescale(video_timescale);
}
// int32 kRecorderVideoBitrate = "android.media.mediarecorder.video-bitrate";
- int32_t videoBitRate = -1;
- if (item->getInt32("android.media.mediarecorder.video-bitrate", &videoBitRate)) {
- metrics_proto.set_video_bitrate(videoBitRate);
+ int32_t video_bitrate = -1;
+ if (item->getInt32("android.media.mediarecorder.video-bitrate", &video_bitrate)) {
+ metrics_proto.set_video_bitrate(video_bitrate);
}
// int32 kRecorderVideoIframeInterval = "android.media.mediarecorder.video-iframe-interval";
- int32_t iFrameInterval = -1;
- if (item->getInt32("android.media.mediarecorder.video-iframe-interval", &iFrameInterval)) {
- metrics_proto.set_iframe_interval(iFrameInterval);
+ int32_t iframe_interval = -1;
+ if (item->getInt32("android.media.mediarecorder.video-iframe-interval", &iframe_interval)) {
+ metrics_proto.set_iframe_interval(iframe_interval);
}
std::string serialized;
@@ -172,17 +172,47 @@
return false;
}
- if (enabled_statsd) {
- android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
- (void)android::util::stats_write(android::util::MEDIAMETRICS_RECORDER_REPORTED,
- timestamp, pkgName.c_str(), pkgVersionCode,
- mediaApexVersion,
- bf_serialized);
+ android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
+ int result = android::util::stats_write(android::util::MEDIAMETRICS_RECORDER_REPORTED,
+ timestamp_nanos, package_name.c_str(), package_version_code,
+ media_apex_version,
+ bf_serialized);
+ std::stringstream log;
+ log << "result:" << result << " {"
+ << " mediametrics_recorder_reported:"
+ << android::util::MEDIAMETRICS_RECORDER_REPORTED
+ << " timestamp_nanos:" << timestamp_nanos
+ << " package_name:" << package_name
+ << " package_version_code:" << package_version_code
+ << " media_apex_version:" << media_apex_version
- } else {
- ALOGV("NOT sending: private data (len=%zu)", strlen(serialized.c_str()));
- }
+ << " audio_mime:" << audio_mime
+ << " video_mime:" << video_mime
+ << " video_profile:" << video_profile
+ << " video_level:" << video_level
+ << " width:" << width
+ << " height:" << height
+ << " rotation:" << rotation
+ << " framerate:" << framerate
+ << " capture_fps:" << capture_fps
+ << " capture_fps_enable:" << capture_fps_enable
+ << " duration_millis:" << duration_millis
+ << " paused_millis:" << paused_millis
+ << " paused_count:" << paused_count
+ << " audio_bitrate:" << audio_bitrate
+ << " audio_channels:" << audio_channels
+ << " audio_samplerate:" << audio_samplerate
+ << " movie_timescale:" << movie_timescale
+ << " audio_timescale:" << audio_timescale
+ << " video_timescale:" << video_timescale
+ << " video_bitrate:" << video_bitrate
+
+ << " iframe_interval:" << iframe_interval
+ // TODO Recorder - add log_session_id
+ // << " log_session_id:" << log_session_id
+ << " }";
+ statsdLog->log(android::util::MEDIAMETRICS_RECORDER_REPORTED, log.str());
return true;
}
diff --git a/services/mediametrics/tests/mediametrics_tests.cpp b/services/mediametrics/tests/mediametrics_tests.cpp
index ac9c7fa..2336d6f 100644
--- a/services/mediametrics/tests/mediametrics_tests.cpp
+++ b/services/mediametrics/tests/mediametrics_tests.cpp
@@ -809,7 +809,9 @@
(*item3).set("four", (int32_t)4)
.setTimestamp(12);
- android::mediametrics::AudioAnalytics audioAnalytics;
+ std::shared_ptr<mediametrics::StatsdLog> statsdLog =
+ std::make_shared<mediametrics::StatsdLog>(10);
+ android::mediametrics::AudioAnalytics audioAnalytics{statsdLog};
// untrusted entities cannot create a new key.
ASSERT_EQ(PERMISSION_DENIED, audioAnalytics.submit(item, false /* isTrusted */));
@@ -817,7 +819,7 @@
// TODO: Verify contents of AudioAnalytics.
// Currently there is no getter API in AudioAnalytics besides dump.
- ASSERT_EQ(11, audioAnalytics.dump(1000).second /* lines */);
+ ASSERT_EQ(10, audioAnalytics.dump(1000).second /* lines */);
ASSERT_EQ(NO_ERROR, audioAnalytics.submit(item, true /* isTrusted */));
// untrusted entities can add to an existing key
@@ -845,7 +847,9 @@
(*item3).set("four", (int32_t)4)
.setTimestamp(12);
- android::mediametrics::AudioAnalytics audioAnalytics;
+ std::shared_ptr<mediametrics::StatsdLog> statsdLog =
+ std::make_shared<mediametrics::StatsdLog>(10);
+ android::mediametrics::AudioAnalytics audioAnalytics{statsdLog};
// untrusted entities cannot create a new key.
ASSERT_EQ(PERMISSION_DENIED, audioAnalytics.submit(item, false /* isTrusted */));
@@ -853,7 +857,7 @@
// TODO: Verify contents of AudioAnalytics.
// Currently there is no getter API in AudioAnalytics besides dump.
- ASSERT_EQ(11, audioAnalytics.dump(1000).second /* lines */);
+ ASSERT_EQ(10, audioAnalytics.dump(1000).second /* lines */);
ASSERT_EQ(NO_ERROR, audioAnalytics.submit(item, true /* isTrusted */));
// untrusted entities can add to an existing key
@@ -877,7 +881,9 @@
(*item3).set("four", (int32_t)4)
.setTimestamp(12);
- android::mediametrics::AudioAnalytics audioAnalytics;
+ std::shared_ptr<mediametrics::StatsdLog> statsdLog =
+ std::make_shared<mediametrics::StatsdLog>(10);
+ android::mediametrics::AudioAnalytics audioAnalytics{statsdLog};
ASSERT_EQ(NO_ERROR, audioAnalytics.submit(item, true /* isTrusted */));
// untrusted entities can add to an existing key
diff --git a/services/mediaresourcemanager/Android.bp b/services/mediaresourcemanager/Android.bp
index 926de3e..db61061 100644
--- a/services/mediaresourcemanager/Android.bp
+++ b/services/mediaresourcemanager/Android.bp
@@ -74,6 +74,9 @@
"ResourceManagerService.cpp",
"ResourceObserverService.cpp",
"ServiceLog.cpp",
+
+ // TODO: convert to AIDL?
+ "IMediaResourceMonitor.cpp",
],
shared_libs: [
diff --git a/services/mediaresourcemanager/IMediaResourceMonitor.cpp b/services/mediaresourcemanager/IMediaResourceMonitor.cpp
new file mode 100644
index 0000000..42d7feb
--- /dev/null
+++ b/services/mediaresourcemanager/IMediaResourceMonitor.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "IMediaResourceMonitor.h"
+#include <binder/Parcel.h>
+#include <utils/Errors.h>
+#include <sys/types.h>
+
+namespace android {
+
+// ----------------------------------------------------------------------
+
+class BpMediaResourceMonitor : public BpInterface<IMediaResourceMonitor> {
+public:
+ explicit BpMediaResourceMonitor(const sp<IBinder>& impl)
+ : BpInterface<IMediaResourceMonitor>(impl) {}
+
+ virtual void notifyResourceGranted(/*in*/ int32_t pid, /*in*/ const int32_t type)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaResourceMonitor::getInterfaceDescriptor());
+ data.writeInt32(pid);
+ data.writeInt32(type);
+ remote()->transact(NOTIFY_RESOURCE_GRANTED, data, &reply, IBinder::FLAG_ONEWAY);
+ }
+};
+
+IMPLEMENT_META_INTERFACE(MediaResourceMonitor, "android.media.IMediaResourceMonitor")
+
+// ----------------------------------------------------------------------
+
+// NOLINTNEXTLINE(google-default-arguments)
+status_t BnMediaResourceMonitor::onTransact( uint32_t code, const Parcel& data, Parcel* reply,
+ uint32_t flags) {
+ switch(code) {
+ case NOTIFY_RESOURCE_GRANTED: {
+ CHECK_INTERFACE(IMediaResourceMonitor, data, reply);
+ int32_t pid = data.readInt32();
+ const int32_t type = data.readInt32();
+ notifyResourceGranted(/*in*/ pid, /*in*/ type);
+ return NO_ERROR;
+ } break;
+ default:
+ return BBinder::onTransact(code, data, reply, flags);
+ }
+}
+
+// ----------------------------------------------------------------------
+
+} // namespace android
diff --git a/services/mediaresourcemanager/IMediaResourceMonitor.h b/services/mediaresourcemanager/IMediaResourceMonitor.h
new file mode 100644
index 0000000..f92d557
--- /dev/null
+++ b/services/mediaresourcemanager/IMediaResourceMonitor.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#ifndef __ANDROID_VNDK__
+
+#include <binder/IInterface.h>
+
+namespace android {
+
+// ----------------------------------------------------------------------
+
+class IMediaResourceMonitor : public IInterface {
+public:
+ DECLARE_META_INTERFACE(MediaResourceMonitor)
+
+ // Values should be in sync with Intent.EXTRA_MEDIA_RESOURCE_TYPE_XXX.
+ enum {
+ TYPE_VIDEO_CODEC = 0,
+ TYPE_AUDIO_CODEC = 1,
+ };
+
+ virtual void notifyResourceGranted(/*in*/ int32_t pid, /*in*/ const int32_t type) = 0;
+
+ enum {
+ NOTIFY_RESOURCE_GRANTED = IBinder::FIRST_CALL_TRANSACTION,
+ };
+};
+
+// ----------------------------------------------------------------------
+
+class BnMediaResourceMonitor : public BnInterface<IMediaResourceMonitor> {
+public:
+ // NOLINTNEXTLINE(google-default-arguments)
+ virtual status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply,
+ uint32_t flags = 0);
+};
+
+// ----------------------------------------------------------------------
+
+} // namespace android
+
+#else // __ANDROID_VNDK__
+#error "This header is not visible to vendors"
+#endif // __ANDROID_VNDK__
diff --git a/services/mediaresourcemanager/ResourceManagerService.cpp b/services/mediaresourcemanager/ResourceManagerService.cpp
index 289cffd..953686b 100644
--- a/services/mediaresourcemanager/ResourceManagerService.cpp
+++ b/services/mediaresourcemanager/ResourceManagerService.cpp
@@ -21,7 +21,6 @@
#include <android/binder_manager.h>
#include <android/binder_process.h>
-#include <binder/IMediaResourceMonitor.h>
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
#include <cutils/sched_policy.h>
@@ -36,6 +35,7 @@
#include <sys/time.h>
#include <unistd.h>
+#include "IMediaResourceMonitor.h"
#include "ResourceManagerService.h"
#include "ResourceObserverService.h"
#include "ServiceLog.h"
diff --git a/services/mediatranscoding/tests/Android.bp b/services/mediatranscoding/tests/Android.bp
index 4df5a9f..cb180ec 100644
--- a/services/mediatranscoding/tests/Android.bp
+++ b/services/mediatranscoding/tests/Android.bp
@@ -25,6 +25,7 @@
],
shared_libs: [
+ "libactivitymanager_aidl",
"libbinder",
"libbinder_ndk",
"liblog",
diff --git a/services/mediatranscoding/tests/MediaTranscodingServiceTestHelper.h b/services/mediatranscoding/tests/MediaTranscodingServiceTestHelper.h
index 3f7d8d6..20e4bfb 100644
--- a/services/mediatranscoding/tests/MediaTranscodingServiceTestHelper.h
+++ b/services/mediatranscoding/tests/MediaTranscodingServiceTestHelper.h
@@ -434,6 +434,34 @@
session.request.destinationFilePath == destinationFilePath));
}
+ template <bool expectation = success>
+ bool addClientUid(int32_t sessionId, uid_t clientUid) {
+ constexpr bool shouldSucceed = (expectation == success);
+ bool result;
+ Status status = mClient->addClientUid(sessionId, clientUid, &result);
+
+ EXPECT_TRUE(status.isOk());
+ EXPECT_EQ(result, shouldSucceed);
+
+ return status.isOk() && (result == shouldSucceed);
+ }
+
+ template <bool expectation = success>
+ bool getClientUids(int32_t sessionId, std::vector<int32_t>* clientUids) {
+ constexpr bool shouldSucceed = (expectation == success);
+ std::optional<std::vector<int32_t>> aidl_return;
+ Status status = mClient->getClientUids(sessionId, &aidl_return);
+
+ EXPECT_TRUE(status.isOk());
+ bool success = (aidl_return != std::nullopt);
+ if (success) {
+ *clientUids = *aidl_return;
+ }
+ EXPECT_EQ(success, shouldSucceed);
+
+ return status.isOk() && (success == shouldSucceed);
+ }
+
int32_t mClientId;
pid_t mClientPid;
uid_t mClientUid;
@@ -500,8 +528,24 @@
EXPECT_TRUE(mClient3->unregisterClient().isOk());
}
+ const char* prepareOutputFile(const char* path) {
+ deleteFile(path);
+ return path;
+ }
+
void deleteFile(const char* path) { unlink(path); }
+ void dismissKeyguard() {
+ EXPECT_TRUE(ShellHelper::RunCmd("input keyevent KEYCODE_WAKEUP"));
+ EXPECT_TRUE(ShellHelper::RunCmd("wm dismiss-keyguard"));
+ }
+
+ void stopAppPackages() {
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageA));
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageB));
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageC));
+ }
+
std::shared_ptr<IMediaTranscodingService> mService;
std::shared_ptr<TestClientCallback> mClient1;
std::shared_ptr<TestClientCallback> mClient2;
diff --git a/services/mediatranscoding/tests/mediatranscodingservice_real_tests.cpp b/services/mediatranscoding/tests/mediatranscodingservice_real_tests.cpp
index 0550d77..e9eebe2 100644
--- a/services/mediatranscoding/tests/mediatranscodingservice_real_tests.cpp
+++ b/services/mediatranscoding/tests/mediatranscodingservice_real_tests.cpp
@@ -33,7 +33,7 @@
namespace media {
-constexpr int64_t kPaddingUs = 400000;
+constexpr int64_t kPaddingUs = 1000000;
constexpr int64_t kSessionWithPaddingUs = 10000000 + kPaddingUs;
constexpr int32_t kBitRate = 8 * 1000 * 1000; // 8Mbs
@@ -56,8 +56,7 @@
registerMultipleClients();
const char* srcPath = "bad_file_uri";
- const char* dstPath = OUTPATH(TestInvalidSource);
- deleteFile(dstPath);
+ const char* dstPath = prepareOutputFile(OUTPATH(TestInvalidSource));
// Submit one session.
EXPECT_TRUE(
@@ -73,8 +72,7 @@
TEST_F(MediaTranscodingServiceRealTest, TestPassthru) {
registerMultipleClients();
- const char* dstPath = OUTPATH(TestPassthru);
- deleteFile(dstPath);
+ const char* dstPath = prepareOutputFile(OUTPATH(TestPassthru));
// Submit one session.
EXPECT_TRUE(mClient1->submit(0, kShortSrcPath, dstPath));
@@ -89,8 +87,7 @@
TEST_F(MediaTranscodingServiceRealTest, TestTranscodeVideo) {
registerMultipleClients();
- const char* dstPath = OUTPATH(TestTranscodeVideo);
- deleteFile(dstPath);
+ const char* dstPath = prepareOutputFile(OUTPATH(TestTranscodeVideo));
// Submit one session.
EXPECT_TRUE(mClient1->submit(0, kShortSrcPath, dstPath, TranscodingSessionPriority::kNormal,
@@ -106,8 +103,7 @@
TEST_F(MediaTranscodingServiceRealTest, TestTranscodeVideoProgress) {
registerMultipleClients();
- const char* dstPath = OUTPATH(TestTranscodeVideoProgress);
- deleteFile(dstPath);
+ const char* dstPath = prepareOutputFile(OUTPATH(TestTranscodeVideoProgress));
// Submit one session.
EXPECT_TRUE(mClient1->submit(0, kLongSrcPath, dstPath, TranscodingSessionPriority::kNormal,
@@ -134,11 +130,9 @@
const char* srcPath0 = kLongSrcPath;
const char* srcPath1 = kShortSrcPath;
- const char* dstPath0 = OUTPATH(TestCancelImmediately_Session0);
- const char* dstPath1 = OUTPATH(TestCancelImmediately_Session1);
+ const char* dstPath0 = prepareOutputFile(OUTPATH(TestCancelImmediately_Session0));
+ const char* dstPath1 = prepareOutputFile(OUTPATH(TestCancelImmediately_Session1));
- deleteFile(dstPath0);
- deleteFile(dstPath1);
// Submit one session, should start immediately.
EXPECT_TRUE(
mClient1->submit(0, srcPath0, dstPath0, TranscodingSessionPriority::kNormal, kBitRate));
@@ -166,11 +160,9 @@
const char* srcPath0 = kLongSrcPath;
const char* srcPath1 = kShortSrcPath;
- const char* dstPath0 = OUTPATH(TestCancelWhileRunning_Session0);
- const char* dstPath1 = OUTPATH(TestCancelWhileRunning_Session1);
+ const char* dstPath0 = prepareOutputFile(OUTPATH(TestCancelWhileRunning_Session0));
+ const char* dstPath1 = prepareOutputFile(OUTPATH(TestCancelWhileRunning_Session1));
- deleteFile(dstPath0);
- deleteFile(dstPath1);
// Submit two sessions, session 0 should start immediately, session 1 should be queued.
EXPECT_TRUE(
mClient1->submit(0, srcPath0, dstPath0, TranscodingSessionPriority::kNormal, kBitRate));
@@ -197,10 +189,8 @@
const char* srcPath0 = kLongSrcPath;
const char* srcPath1 = kShortSrcPath;
- const char* dstPath0 = OUTPATH(TestPauseResumeSingleClient_Session0);
- const char* dstPath1 = OUTPATH(TestPauseResumeSingleClient_Session1);
- deleteFile(dstPath0);
- deleteFile(dstPath1);
+ const char* dstPath0 = prepareOutputFile(OUTPATH(TestPauseResumeSingleClient_Session0));
+ const char* dstPath1 = prepareOutputFile(OUTPATH(TestPauseResumeSingleClient_Session1));
// Submit one offline session, should start immediately.
EXPECT_TRUE(mClient1->submit(0, srcPath0, dstPath0, TranscodingSessionPriority::kUnspecified,
@@ -244,20 +234,15 @@
TEST_F(MediaTranscodingServiceRealTest, TestPauseResumeMultiClients) {
ALOGD("TestPauseResumeMultiClients starting...");
- EXPECT_TRUE(ShellHelper::RunCmd("input keyevent KEYCODE_WAKEUP"));
- EXPECT_TRUE(ShellHelper::RunCmd("wm dismiss-keyguard"));
- EXPECT_TRUE(ShellHelper::Stop(kClientPackageA));
- EXPECT_TRUE(ShellHelper::Stop(kClientPackageB));
- EXPECT_TRUE(ShellHelper::Stop(kClientPackageC));
+ dismissKeyguard();
+ stopAppPackages();
registerMultipleClients();
const char* srcPath0 = kLongSrcPath;
const char* srcPath1 = kShortSrcPath;
- const char* dstPath0 = OUTPATH(TestPauseResumeMultiClients_Client0);
- const char* dstPath1 = OUTPATH(TestPauseResumeMultiClients_Client1);
- deleteFile(dstPath0);
- deleteFile(dstPath1);
+ const char* dstPath0 = prepareOutputFile(OUTPATH(TestPauseResumeMultiClients_Client0));
+ const char* dstPath1 = prepareOutputFile(OUTPATH(TestPauseResumeMultiClients_Client1));
ALOGD("Moving app A to top...");
EXPECT_TRUE(ShellHelper::Start(kClientPackageA, kTestActivityName));
@@ -294,12 +279,177 @@
unregisterMultipleClients();
- EXPECT_TRUE(ShellHelper::Stop(kClientPackageA));
- EXPECT_TRUE(ShellHelper::Stop(kClientPackageB));
- EXPECT_TRUE(ShellHelper::Stop(kClientPackageC));
+ stopAppPackages();
ALOGD("TestPauseResumeMultiClients finished.");
}
+TEST_F(MediaTranscodingServiceRealTest, TestUidGoneForeground) {
+ ALOGD("TestUidGoneForeground starting...");
+
+ dismissKeyguard();
+ stopAppPackages();
+
+ registerMultipleClients();
+
+ const char* dstPath0 = prepareOutputFile(OUTPATH(TestUidGoneForegroundSession0));
+ const char* dstPath1 = prepareOutputFile(OUTPATH(TestUidGoneForegroundSession1));
+
+ // Test kill foreground app, using only 1 uid.
+ ALOGD("Moving app A to top...");
+ EXPECT_TRUE(ShellHelper::Start(kClientPackageA, kTestActivityName));
+
+ // Submit sessions to Client1 (app A).
+ ALOGD("Submitting sessions to client1 (app A) ...");
+ EXPECT_TRUE(mClient1->submit(0, kLongSrcPath, dstPath0, TranscodingSessionPriority::kNormal,
+ kBitRate));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
+ EXPECT_TRUE(mClient1->submit(1, kLongSrcPath, dstPath1, TranscodingSessionPriority::kNormal,
+ kBitRate));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::NoEvent);
+
+ // Kill app A, expect to see A's session pause followed by B's session start,
+ // then A's session cancelled with error code kUidGoneCancelled.
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageA));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Failed(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->getLastError(), TranscodingErrorCode::kUidGoneCancelled);
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Failed(CLIENT(1), 1));
+ EXPECT_EQ(mClient1->getLastError(), TranscodingErrorCode::kUidGoneCancelled);
+
+ unregisterMultipleClients();
+
+ stopAppPackages();
+
+ ALOGD("TestUidGoneForeground finished.");
+}
+
+TEST_F(MediaTranscodingServiceRealTest, TestUidGoneForegroundMultiUids) {
+ ALOGD("TestUidGoneForegroundMultiUids starting...");
+
+ dismissKeyguard();
+ stopAppPackages();
+
+ registerMultipleClients();
+
+ const char* dstPath0 = prepareOutputFile(OUTPATH(TestUidGoneForegroundSession0));
+ const char* dstPath1 = prepareOutputFile(OUTPATH(TestUidGoneForegroundSession1));
+
+ // Test kill foreground app, using two uids.
+ ALOGD("Moving app B to top...");
+ EXPECT_TRUE(ShellHelper::Start(kClientPackageB, kTestActivityName));
+ EXPECT_TRUE(mClient2->submit(0, kLongSrcPath, dstPath0, TranscodingSessionPriority::kNormal,
+ kBitRate));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Start(CLIENT(2), 0));
+ EXPECT_TRUE(mClient2->submit(1, kLongSrcPath, dstPath1, TranscodingSessionPriority::kNormal,
+ kBitRate));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::NoEvent);
+ // Make app A also requesting session 1.
+ EXPECT_TRUE(mClient2->addClientUid(1, mClient1->mClientUid));
+
+ ALOGD("Moving app A to top...");
+ EXPECT_TRUE(ShellHelper::Start(kClientPackageA, kTestActivityName));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Pause(CLIENT(2), 0));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Start(CLIENT(2), 1));
+
+ // Kill app A, CLIENT(2)'s session 1 should continue because it's also requested by app B.
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageA));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::NoEvent);
+
+ // Kill app B, sessions should be cancelled.
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageB));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Failed(CLIENT(2), 0));
+ EXPECT_EQ(mClient2->getLastError(), TranscodingErrorCode::kUidGoneCancelled);
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Failed(CLIENT(2), 1));
+ EXPECT_EQ(mClient2->getLastError(), TranscodingErrorCode::kUidGoneCancelled);
+
+ unregisterMultipleClients();
+
+ stopAppPackages();
+
+ ALOGD("TestUidGoneForegroundMultiUids finished.");
+}
+TEST_F(MediaTranscodingServiceRealTest, TestUidGoneBackground) {
+ ALOGD("TestUidGoneBackground starting...");
+
+ dismissKeyguard();
+ stopAppPackages();
+
+ registerMultipleClients();
+
+ const char* dstPath0 = prepareOutputFile(OUTPATH(TestUidGoneForegroundSession0));
+ const char* dstPath1 = prepareOutputFile(OUTPATH(TestUidGoneForegroundSession1));
+
+ // Test kill background app, using two uids.
+ ALOGD("Moving app B to top...");
+ EXPECT_TRUE(ShellHelper::Start(kClientPackageB, kTestActivityName));
+ EXPECT_TRUE(mClient2->submit(0, kLongSrcPath, dstPath0, TranscodingSessionPriority::kNormal,
+ kBitRate));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Start(CLIENT(2), 0));
+ EXPECT_TRUE(mClient2->submit(1, kLongSrcPath, dstPath1, TranscodingSessionPriority::kNormal,
+ kBitRate));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::NoEvent);
+
+ ALOGD("Moving app A to top...");
+ EXPECT_TRUE(ShellHelper::Start(kClientPackageA, kTestActivityName));
+ EXPECT_TRUE(mClient1->submit(0, kLongSrcPath, dstPath0, TranscodingSessionPriority::kNormal,
+ kBitRate));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Pause(CLIENT(2), 0));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
+
+ // Kill app B, all its sessions should be cancelled.
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageB));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Failed(CLIENT(2), 0));
+ EXPECT_EQ(mClient2->getLastError(), TranscodingErrorCode::kUidGoneCancelled);
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Failed(CLIENT(2), 1));
+ EXPECT_EQ(mClient2->getLastError(), TranscodingErrorCode::kUidGoneCancelled);
+
+ unregisterMultipleClients();
+
+ stopAppPackages();
+
+ ALOGD("TestUidGoneBackground finished.");
+}
+
+TEST_F(MediaTranscodingServiceRealTest, TestUidGoneBackgroundMultiUids) {
+ ALOGD("TestUidGoneBackgroundMultiUids starting...");
+
+ dismissKeyguard();
+ stopAppPackages();
+
+ registerMultipleClients();
+
+ const char* dstPath0 = prepareOutputFile(OUTPATH(TestUidGoneForegroundSession0));
+ const char* dstPath1 = prepareOutputFile(OUTPATH(TestUidGoneForegroundSession1));
+
+ // Test kill background app, using two uids.
+ ALOGD("Moving app B to top...");
+ EXPECT_TRUE(ShellHelper::Start(kClientPackageB, kTestActivityName));
+ EXPECT_TRUE(mClient2->submit(0, kLongSrcPath, dstPath0, TranscodingSessionPriority::kNormal,
+ kBitRate));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Start(CLIENT(2), 0));
+ EXPECT_TRUE(mClient2->submit(1, kLongSrcPath, dstPath1, TranscodingSessionPriority::kNormal,
+ kBitRate));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::NoEvent);
+ // Make app A also requesting session 1.
+ EXPECT_TRUE(mClient2->addClientUid(1, mClient1->mClientUid));
+
+ ALOGD("Moving app A to top...");
+ EXPECT_TRUE(ShellHelper::Start(kClientPackageA, kTestActivityName));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Pause(CLIENT(2), 0));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Start(CLIENT(2), 1));
+
+ // Kill app B, CLIENT(2)'s session 1 should continue to run, session 0 on
+ // the other hand should be cancelled.
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageB));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Failed(CLIENT(2), 0));
+ EXPECT_EQ(mClient2->getLastError(), TranscodingErrorCode::kUidGoneCancelled);
+
+ unregisterMultipleClients();
+
+ stopAppPackages();
+
+ ALOGD("TestUidGoneBackgroundMultiUids finished.");
+}
+
} // namespace media
} // namespace android
diff --git a/services/mediatranscoding/tests/mediatranscodingservice_simulated_tests.cpp b/services/mediatranscoding/tests/mediatranscodingservice_simulated_tests.cpp
index c8994ac..cb354f4 100644
--- a/services/mediatranscoding/tests/mediatranscodingservice_simulated_tests.cpp
+++ b/services/mediatranscoding/tests/mediatranscodingservice_simulated_tests.cpp
@@ -36,6 +36,7 @@
#include <iostream>
#include <list>
+#include <unordered_set>
#include "MediaTranscodingServiceTestHelper.h"
#include "SimulatedTranscoder.h"
@@ -255,6 +256,54 @@
unregisterMultipleClients();
}
+TEST_F(MediaTranscodingServiceSimulatedTest, TestAddGetClientUids) {
+ registerMultipleClients();
+
+ std::vector<int32_t> clientUids;
+ TranscodingRequestParcel request;
+ TranscodingSessionParcel session;
+ uid_t ownUid = ::getuid();
+
+ // Submit one real-time session.
+ EXPECT_TRUE(mClient1->submit(0, "test_source_file_0", "test_destination_file"));
+
+ // Should have mClientUid in client uid list.
+ EXPECT_TRUE(mClient1->getClientUids(0, &clientUids));
+ EXPECT_EQ(clientUids.size(), 1u);
+ EXPECT_EQ(clientUids[0], (int32_t)mClient1->mClientUid);
+
+ // Adding invalid client uid should fail.
+ EXPECT_TRUE(mClient1->addClientUid<fail>(0, kInvalidClientUid));
+
+ // Adding mClientUid again should fail.
+ EXPECT_TRUE(mClient1->addClientUid<fail>(0, mClient1->mClientUid));
+
+ // Submit one offline session.
+ EXPECT_TRUE(mClient1->submit(1, "test_source_file_1", "test_destination_file_1",
+ TranscodingSessionPriority::kUnspecified));
+
+ // Should not have any uids in client uid list.
+ EXPECT_TRUE(mClient1->getClientUids(1, &clientUids));
+ EXPECT_EQ(clientUids.size(), 0u);
+
+ // Add own uid (with IMediaTranscodingService::USE_CALLING_UID), should succeed.
+ EXPECT_TRUE(mClient1->addClientUid(1, IMediaTranscodingService::USE_CALLING_UID));
+ EXPECT_TRUE(mClient1->getClientUids(1, &clientUids));
+ EXPECT_EQ(clientUids.size(), 1u);
+ EXPECT_EQ(clientUids[0], (int32_t)ownUid);
+
+ // Adding mClientUid should succeed.
+ EXPECT_TRUE(mClient1->addClientUid(1, mClient1->mClientUid));
+ EXPECT_TRUE(mClient1->getClientUids(1, &clientUids));
+ std::unordered_set<uid_t> uidSet;
+ uidSet.insert(clientUids.begin(), clientUids.end());
+ EXPECT_EQ(uidSet.size(), 2u);
+ EXPECT_EQ(uidSet.count(ownUid), 1u);
+ EXPECT_EQ(uidSet.count(mClient1->mClientUid), 1u);
+
+ unregisterMultipleClients();
+}
+
TEST_F(MediaTranscodingServiceSimulatedTest, TestSubmitCancelWithOfflineSessions) {
registerMultipleClients();
@@ -378,6 +427,53 @@
ALOGD("TestTranscodingUidPolicy finished.");
}
+TEST_F(MediaTranscodingServiceSimulatedTest, TestTranscodingUidPolicyWithMultipleClientUids) {
+ ALOGD("TestTranscodingUidPolicyWithMultipleClientUids starting...");
+
+ EXPECT_TRUE(ShellHelper::RunCmd("input keyevent KEYCODE_WAKEUP"));
+ EXPECT_TRUE(ShellHelper::RunCmd("wm dismiss-keyguard"));
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageA));
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageB));
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageC));
+
+ registerMultipleClients();
+
+ ALOGD("Moving app A to top...");
+ EXPECT_TRUE(ShellHelper::Start(kClientPackageA, kTestActivityName));
+
+ // Submit 3 requests.
+ ALOGD("Submitting session to client1 (app A)...");
+ EXPECT_TRUE(mClient1->submit(0, "test_source_file_0", "test_destination_file_0"));
+ EXPECT_TRUE(mClient1->submit(1, "test_source_file_1", "test_destination_file_1"));
+ EXPECT_TRUE(mClient1->submit(2, "test_source_file_2", "test_destination_file_2"));
+
+ // mClient1's Session 0 should start immediately.
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
+
+ // Add client2 (app B)'s uid to mClient1's session 1.
+ EXPECT_TRUE(mClient1->addClientUid(1, mClient2->mClientUid));
+
+ ALOGD("Moving app B to top...");
+ EXPECT_TRUE(ShellHelper::Start(kClientPackageB, kTestActivityName));
+
+ // mClient1's session 0 should pause, session 1 should start.
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Pause(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 1));
+
+ ALOGD("Moving app A back to top...");
+ EXPECT_TRUE(ShellHelper::Start(kClientPackageA, kTestActivityName));
+ EXPECT_EQ(mClient1->pop(kSessionWithPaddingUs), EventTracker::Finished(CLIENT(1), 1));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Resume(CLIENT(1), 0));
+
+ unregisterMultipleClients();
+
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageA));
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageB));
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageC));
+
+ ALOGD("TestTranscodingUidPolicyWithMultipleClientUids finished.");
+}
+
TEST_F(MediaTranscodingServiceSimulatedTest, TestTranscodingThermalPolicy) {
ALOGD("TestTranscodingThermalPolicy starting...");
diff --git a/services/oboeservice/AAudioServiceEndpoint.cpp b/services/oboeservice/AAudioServiceEndpoint.cpp
index faea58f..13dd3d3 100644
--- a/services/oboeservice/AAudioServiceEndpoint.cpp
+++ b/services/oboeservice/AAudioServiceEndpoint.cpp
@@ -38,6 +38,10 @@
using namespace android; // TODO just import names needed
using namespace aaudio; // TODO just import names needed
+AAudioServiceEndpoint::~AAudioServiceEndpoint() {
+ ALOGD("%s() called", __func__);
+}
+
std::string AAudioServiceEndpoint::dump() const NO_THREAD_SAFETY_ANALYSIS {
std::stringstream result;
diff --git a/services/oboeservice/AAudioServiceEndpoint.h b/services/oboeservice/AAudioServiceEndpoint.h
index 72090c2..a7f63d3 100644
--- a/services/oboeservice/AAudioServiceEndpoint.h
+++ b/services/oboeservice/AAudioServiceEndpoint.h
@@ -43,7 +43,7 @@
, public AAudioStreamParameters {
public:
- virtual ~AAudioServiceEndpoint() = default;
+ virtual ~AAudioServiceEndpoint();
virtual std::string dump() const;
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.cpp b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
index 556710d..7294a58 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.cpp
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
@@ -51,8 +51,6 @@
: mMmapStream(nullptr)
, mAAudioService(audioService) {}
-AAudioServiceEndpointMMAP::~AAudioServiceEndpointMMAP() {}
-
std::string AAudioServiceEndpointMMAP::dump() const {
std::stringstream result;
@@ -357,7 +355,10 @@
// This is called by AudioFlinger when it wants to destroy a stream.
void AAudioServiceEndpointMMAP::onTearDown(audio_port_handle_t portHandle) {
ALOGD("%s(portHandle = %d) called", __func__, portHandle);
- std::thread asyncTask(&AAudioServiceEndpointMMAP::handleTearDownAsync, this, portHandle);
+ android::sp<AAudioServiceEndpointMMAP> holdEndpoint(this);
+ std::thread asyncTask([holdEndpoint, portHandle]() {
+ holdEndpoint->handleTearDownAsync(portHandle);
+ });
asyncTask.detach();
}
@@ -378,9 +379,11 @@
ALOGD("%s() called with dev %d, old = %d", __func__, deviceId, getDeviceId());
if (getDeviceId() != deviceId) {
if (getDeviceId() != AUDIO_PORT_HANDLE_NONE) {
- std::thread asyncTask([this, deviceId]() {
- disconnectRegisteredStreams();
- setDeviceId(deviceId);
+ android::sp<AAudioServiceEndpointMMAP> holdEndpoint(this);
+ std::thread asyncTask([holdEndpoint, deviceId]() {
+ ALOGD("onRoutingChanged() asyncTask launched");
+ holdEndpoint->disconnectRegisteredStreams();
+ holdEndpoint->setDeviceId(deviceId);
});
asyncTask.detach();
} else {
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.h b/services/oboeservice/AAudioServiceEndpointMMAP.h
index 24b161d..5a53885 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.h
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.h
@@ -44,7 +44,7 @@
public:
explicit AAudioServiceEndpointMMAP(android::AAudioService &audioService);
- virtual ~AAudioServiceEndpointMMAP();
+ virtual ~AAudioServiceEndpointMMAP() = default;
std::string dump() const override;
diff --git a/services/oboeservice/AAudioServiceEndpointShared.cpp b/services/oboeservice/AAudioServiceEndpointShared.cpp
index 501e8c0..0d453cf 100644
--- a/services/oboeservice/AAudioServiceEndpointShared.cpp
+++ b/services/oboeservice/AAudioServiceEndpointShared.cpp
@@ -111,7 +111,7 @@
if (!endpoint->isConnected()) {
ALOGD("%s() call safeReleaseCloseFromCallback()", __func__);
// Release and close under a lock with no check for callback collisions.
- endpoint->getStreamInternal()->safeReleaseCloseFromCallback();
+ endpoint->getStreamInternal()->safeReleaseCloseInternal();
}
return result;
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index 694094c..dbacd75 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -67,8 +67,7 @@
// If the stream is deleted when OPEN or in use then audio resources will leak.
// This would indicate an internal error. So we want to find this ASAP.
LOG_ALWAYS_FATAL_IF(!(getState() == AAUDIO_STREAM_STATE_CLOSED
- || getState() == AAUDIO_STREAM_STATE_UNINITIALIZED
- || getState() == AAUDIO_STREAM_STATE_DISCONNECTED),
+ || getState() == AAUDIO_STREAM_STATE_UNINITIALIZED),
"service stream %p still open, state = %d",
this, getState());
}
@@ -229,7 +228,7 @@
aaudio_result_t result = AAUDIO_OK;
if (auto state = getState();
- state == AAUDIO_STREAM_STATE_CLOSED || state == AAUDIO_STREAM_STATE_DISCONNECTED) {
+ state == AAUDIO_STREAM_STATE_CLOSED || isDisconnected_l()) {
ALOGW("%s() already CLOSED, returns INVALID_STATE, handle = %d",
__func__, getHandle());
return AAUDIO_ERROR_INVALID_STATE;
@@ -261,8 +260,14 @@
sendServiceEvent(AAUDIO_SERVICE_EVENT_STARTED);
setState(AAUDIO_STREAM_STATE_STARTED);
mThreadEnabled.store(true);
+ // Make sure this object does not get deleted before the run() method
+ // can protect it by making a strong pointer.
+ incStrong(nullptr); // See run() method.
result = mTimestampThread.start(this);
- if (result != AAUDIO_OK) goto error;
+ if (result != AAUDIO_OK) {
+ decStrong(nullptr); // run() can't do it so we have to do it here.
+ goto error;
+ }
return result;
@@ -291,10 +296,6 @@
.set(AMEDIAMETRICS_PROP_STATUS, (int32_t)result)
.record(); });
- // Send it now because the timestamp gets rounded up when stopStream() is called below.
- // Also we don't need the timestamps while we are shutting down.
- sendCurrentTimestamp();
-
result = stopTimestampThread();
if (result != AAUDIO_OK) {
disconnect_l();
@@ -340,10 +341,12 @@
setState(AAUDIO_STREAM_STATE_STOPPING);
- // Send it now because the timestamp gets rounded up when stopStream() is called below.
- // Also we don't need the timestamps while we are shutting down.
- sendCurrentTimestamp(); // warning - this calls a virtual function
+ // Temporarily unlock because we are joining the timestamp thread and it may try
+ // to acquire mLock.
+ mLock.unlock();
result = stopTimestampThread();
+ mLock.lock();
+
if (result != AAUDIO_OK) {
disconnect_l();
return result;
@@ -403,15 +406,21 @@
__attribute__((no_sanitize("integer")))
void AAudioServiceStreamBase::run() {
ALOGD("%s() %s entering >>>>>>>>>>>>>> TIMESTAMPS", __func__, getTypeText());
+ // Hold onto the ref counted stream until the end.
+ android::sp<AAudioServiceStreamBase> holdStream(this);
TimestampScheduler timestampScheduler;
+ // Balance the incStrong from when the thread was launched.
+ holdStream->decStrong(nullptr);
+
timestampScheduler.setBurstPeriod(mFramesPerBurst, getSampleRate());
timestampScheduler.start(AudioClock::getNanoseconds());
int64_t nextTime = timestampScheduler.nextAbsoluteTime();
int32_t loopCount = 0;
+ aaudio_result_t result = AAUDIO_OK;
while(mThreadEnabled.load()) {
loopCount++;
if (AudioClock::getNanoseconds() >= nextTime) {
- aaudio_result_t result = sendCurrentTimestamp();
+ result = sendCurrentTimestamp();
if (result != AAUDIO_OK) {
ALOGE("%s() timestamp thread got result = %d", __func__, result);
break;
@@ -423,6 +432,11 @@
AudioClock::sleepUntilNanoTime(nextTime);
}
}
+ // This was moved from the calls in stop_l() and pause_l(), which could cause a deadlock
+ // if it resulted in a call to disconnect.
+ if (result == AAUDIO_OK) {
+ (void) sendCurrentTimestamp();
+ }
ALOGD("%s() %s exiting after %d loops <<<<<<<<<<<<<< TIMESTAMPS",
__func__, getTypeText(), loopCount);
}
@@ -433,8 +447,7 @@
}
void AAudioServiceStreamBase::disconnect_l() {
- if (getState() != AAUDIO_STREAM_STATE_DISCONNECTED
- && getState() != AAUDIO_STREAM_STATE_CLOSED) {
+ if (!isDisconnected_l() && getState() != AAUDIO_STREAM_STATE_CLOSED) {
mediametrics::LogItem(mMetricsId)
.set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_DISCONNECT)
@@ -442,7 +455,7 @@
.record();
sendServiceEvent(AAUDIO_SERVICE_EVENT_DISCONNECTED);
- setState(AAUDIO_STREAM_STATE_DISCONNECTED);
+ setDisconnected_l(true);
}
}
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index 06c9f21..c42df0f 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -80,7 +80,7 @@
// because we had to wait until we generated the handle.
void logOpen(aaudio_handle_t streamHandle);
- aaudio_result_t close();
+ aaudio_result_t close() EXCLUDES(mLock);
/**
* Start the flow of audio data.
@@ -88,7 +88,7 @@
* This is not guaranteed to be synchronous but it currently is.
* An AAUDIO_SERVICE_EVENT_STARTED will be sent to the client when complete.
*/
- aaudio_result_t start();
+ aaudio_result_t start() EXCLUDES(mLock);
/**
* Stop the flow of data so that start() can resume without loss of data.
@@ -96,7 +96,7 @@
* This is not guaranteed to be synchronous but it currently is.
* An AAUDIO_SERVICE_EVENT_PAUSED will be sent to the client when complete.
*/
- aaudio_result_t pause();
+ aaudio_result_t pause() EXCLUDES(mLock);
/**
* Stop the flow of data after the currently queued data has finished playing.
@@ -105,14 +105,14 @@
* An AAUDIO_SERVICE_EVENT_STOPPED will be sent to the client when complete.
*
*/
- aaudio_result_t stop();
+ aaudio_result_t stop() EXCLUDES(mLock);
/**
* Discard any data held by the underlying HAL or Service.
*
* An AAUDIO_SERVICE_EVENT_FLUSHED will be sent to the client when complete.
*/
- aaudio_result_t flush();
+ aaudio_result_t flush() EXCLUDES(mLock);
virtual aaudio_result_t startClient(const android::AudioClient& client,
const audio_attributes_t *attr __unused,
@@ -126,9 +126,9 @@
return AAUDIO_ERROR_UNAVAILABLE;
}
- aaudio_result_t registerAudioThread(pid_t clientThreadId, int priority);
+ aaudio_result_t registerAudioThread(pid_t clientThreadId, int priority) EXCLUDES(mLock);
- aaudio_result_t unregisterAudioThread(pid_t clientThreadId);
+ aaudio_result_t unregisterAudioThread(pid_t clientThreadId) EXCLUDES(mLock);
bool isRunning() const {
return mState == AAUDIO_STREAM_STATE_STARTED;
@@ -137,7 +137,7 @@
/**
* Fill in a parcelable description of stream.
*/
- aaudio_result_t getDescription(AudioEndpointParcelable &parcelable);
+ aaudio_result_t getDescription(AudioEndpointParcelable &parcelable) EXCLUDES(mLock);
void setRegisteredThread(pid_t pid) {
mRegisteredClientThread = pid;
@@ -153,7 +153,7 @@
void run() override; // to implement Runnable
- void disconnect();
+ void disconnect() EXCLUDES(mLock);
const android::AudioClient &getAudioClient() {
return mMmapClient;
@@ -248,7 +248,7 @@
aaudio_result_t writeUpMessageQueue(AAudioServiceMessage *command);
- aaudio_result_t sendCurrentTimestamp();
+ aaudio_result_t sendCurrentTimestamp() EXCLUDES(mLock);
aaudio_result_t sendXRunCount(int32_t xRunCount);
@@ -265,6 +265,13 @@
aaudio_stream_state_t mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
+ bool isDisconnected_l() const REQUIRES(mLock) {
+ return mDisconnected;
+ }
+ void setDisconnected_l(bool flag) REQUIRES(mLock) {
+ mDisconnected = flag;
+ }
+
pid_t mRegisteredClientThread = ILLEGAL_THREAD_ID;
std::mutex mUpMessageQueueLock;
@@ -322,6 +329,8 @@
// for example a full message queue. Note that this atomic is unrelated to mCloseNeeded.
std::atomic<bool> mSuspended{false};
+ bool mDisconnected GUARDED_BY(mLock) {false};
+
protected:
// Locking order is important.
// Acquire mLock before acquiring AAudioServiceEndpoint::mLockStreams
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.h b/services/oboeservice/AAudioServiceStreamMMAP.h
index 6ba1725..667465a 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.h
+++ b/services/oboeservice/AAudioServiceStreamMMAP.h
@@ -73,7 +73,8 @@
aaudio_result_t getAudioDataDescription(AudioEndpointParcelable &parcelable) override;
- aaudio_result_t getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) override;
+ aaudio_result_t getFreeRunningPosition(int64_t *positionFrames,
+ int64_t *timeNanos) EXCLUDES(mLock) override;
aaudio_result_t getHardwareTimestamp(int64_t *positionFrames, int64_t *timeNanos) override;
diff --git a/services/tuner/TunerDescrambler.cpp b/services/tuner/TunerDescrambler.cpp
index 16338db..b7ae167 100644
--- a/services/tuner/TunerDescrambler.cpp
+++ b/services/tuner/TunerDescrambler.cpp
@@ -67,8 +67,9 @@
return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result res = mDescrambler->addPid(getHidlDemuxPid(pid),
- static_cast<TunerFilter*>(optionalSourceFilter.get())->getHalFilter());
+ sp<IFilter> halFilter = (optionalSourceFilter == NULL)
+ ? NULL : static_cast<TunerFilter*>(optionalSourceFilter.get())->getHalFilter();
+ Result res = mDescrambler->addPid(getHidlDemuxPid(pid), halFilter);
if (res != Result::SUCCESS) {
return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
}
@@ -82,8 +83,9 @@
return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
}
- Result res = mDescrambler->removePid(getHidlDemuxPid(pid),
- static_cast<TunerFilter*>(optionalSourceFilter.get())->getHalFilter());
+ sp<IFilter> halFilter = (optionalSourceFilter == NULL)
+ ? NULL : static_cast<TunerFilter*>(optionalSourceFilter.get())->getHalFilter();
+ Result res = mDescrambler->removePid(getHidlDemuxPid(pid), halFilter);
if (res != Result::SUCCESS) {
return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
}
@@ -109,11 +111,11 @@
DemuxPid hidlPid;
switch (pid.getTag()) {
case TunerDemuxPid::tPid: {
- hidlPid.tPid((uint16_t)pid.tPid);
+ hidlPid.tPid((uint16_t)pid.get<TunerDemuxPid::tPid>());
break;
}
case TunerDemuxPid::mmtpPid: {
- hidlPid.mmtpPid((uint16_t)pid.mmtpPid);
+ hidlPid.mmtpPid((uint16_t)pid.get<TunerDemuxPid::mmtpPid>());
break;
}
}
diff --git a/services/tuner/TunerFilter.cpp b/services/tuner/TunerFilter.cpp
index 39a6723..e957b83 100644
--- a/services/tuner/TunerFilter.cpp
+++ b/services/tuner/TunerFilter.cpp
@@ -57,10 +57,10 @@
return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
}
- MQDesc dvrMQDesc;
+ MQDesc filterMQDesc;
Result res;
mFilter->getQueueDesc([&](Result r, const MQDesc& desc) {
- dvrMQDesc = desc;
+ filterMQDesc = desc;
res = r;
});
if (res != Result::SUCCESS) {
@@ -69,7 +69,7 @@
AidlMQDesc aidlMQDesc;
unsafeHidlToAidlMQDescriptor<uint8_t, int8_t, SynchronizedReadWrite>(
- dvrMQDesc, &aidlMQDesc);
+ filterMQDesc, &aidlMQDesc);
*_aidl_return = move(aidlMQDesc);
return Status::ok();
}
@@ -471,7 +471,7 @@
res = r;
if (res == Result::SUCCESS) {
TunerFilterSharedHandleInfo info{
- .handle = dupToAidl(hidl_handle(avMemory.getNativeHandle())),
+ .handle = dupToAidl(avMemory),
.size = static_cast<int64_t>(avMemSize),
};
*_aidl_return = move(info);
@@ -480,7 +480,10 @@
}
});
- return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+ if (res != Result::SUCCESS) {
+ return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ return Status::ok();
}
Status TunerFilter::releaseAvHandle(
@@ -497,7 +500,6 @@
return Status::ok();
}
-
Status TunerFilter::start() {
if (mFilter == nullptr) {
ALOGE("IFilter is not initialized");
diff --git a/services/tuner/TunerLnb.cpp b/services/tuner/TunerLnb.cpp
index 4a5acf5..77248d4 100644
--- a/services/tuner/TunerLnb.cpp
+++ b/services/tuner/TunerLnb.cpp
@@ -48,7 +48,10 @@
sp<ILnbCallback> lnbCallback = new LnbCallback(tunerLnbCallback);
Result status = mLnb->setCallback(lnbCallback);
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ if (status != Result::SUCCESS) {
+ return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ }
+ return Status::ok();
}
Status TunerLnb::setVoltage(int voltage) {
@@ -58,7 +61,10 @@
}
Result status = mLnb->setVoltage(static_cast<LnbVoltage>(voltage));
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ if (status != Result::SUCCESS) {
+ return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ }
+ return Status::ok();
}
Status TunerLnb::setTone(int tone) {
@@ -68,7 +74,10 @@
}
Result status = mLnb->setTone(static_cast<LnbTone>(tone));
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ if (status != Result::SUCCESS) {
+ return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ }
+ return Status::ok();
}
Status TunerLnb::setSatellitePosition(int position) {
@@ -78,7 +87,10 @@
}
Result status = mLnb->setSatellitePosition(static_cast<LnbPosition>(position));
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ if (status != Result::SUCCESS) {
+ return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ }
+ return Status::ok();
}
Status TunerLnb::sendDiseqcMessage(const vector<uint8_t>& diseqcMessage) {
@@ -88,7 +100,10 @@
}
Result status = mLnb->sendDiseqcMessage(diseqcMessage);
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ if (status != Result::SUCCESS) {
+ return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ }
+ return Status::ok();
}
Status TunerLnb::close() {
diff --git a/services/tuner/TunerTimeFilter.cpp b/services/tuner/TunerTimeFilter.cpp
index 25e1ad9..ea9da30 100644
--- a/services/tuner/TunerTimeFilter.cpp
+++ b/services/tuner/TunerTimeFilter.cpp
@@ -38,7 +38,10 @@
}
Result status = mTimeFilter->setTimeStamp(timeStamp);
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ if (status != Result::SUCCESS) {
+ return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ }
+ return Status::ok();
}
Status TunerTimeFilter::clearTimeStamp() {
@@ -48,7 +51,10 @@
}
Result status = mTimeFilter->clearTimeStamp();
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ if (status != Result::SUCCESS) {
+ return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ }
+ return Status::ok();
}
Status TunerTimeFilter::getSourceTime(int64_t* _aidl_return) {
@@ -66,8 +72,9 @@
});
if (status != Result::SUCCESS) {
*_aidl_return = (int64_t)Constant64Bit::INVALID_PRESENTATION_TIME_STAMP;
+ return Status::fromServiceSpecificError(static_cast<int32_t>(status));
}
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ return Status::ok();
}
Status TunerTimeFilter::getTimeStamp(int64_t* _aidl_return) {
@@ -85,8 +92,9 @@
});
if (status != Result::SUCCESS) {
*_aidl_return = (int64_t)Constant64Bit::INVALID_PRESENTATION_TIME_STAMP;
+ return Status::fromServiceSpecificError(static_cast<int32_t>(status));
}
- return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ return Status::ok();
}
Status TunerTimeFilter::close() {