Merge changes I630138a1,I545e9890,I20223fa1,I9e966951,I9fe167dd
* changes:
codec2: add util method to fill Traits from C2ComponentInterface
CCodec: handle color-transfer-request
codec2: integrate filter plugin with codec2 hidl utils
codec2: add filter plugin interface & wrapper implementation
codec2: allow multiple components to get block pool from ID
diff --git a/camera/CameraBase.cpp b/camera/CameraBase.cpp
index aecb70a..0b0f584 100644
--- a/camera/CameraBase.cpp
+++ b/camera/CameraBase.cpp
@@ -29,6 +29,7 @@
#include <binder/IMemory.h>
#include <camera/CameraBase.h>
+#include <camera/CameraUtils.h>
// needed to instantiate
#include <camera/Camera.h>
@@ -124,9 +125,7 @@
{
Mutex::Autolock _l(gLock);
if (gCameraService.get() == 0) {
- char value[PROPERTY_VALUE_MAX];
- property_get("config.disable_cameraservice", value, "0");
- if (strncmp(value, "0", 2) != 0 && strncasecmp(value, "false", 6) != 0) {
+ if (CameraUtils::isCameraServiceDisabled()) {
return gCameraService;
}
diff --git a/camera/CameraUtils.cpp b/camera/CameraUtils.cpp
index 67fc116..f9b1b37 100644
--- a/camera/CameraUtils.cpp
+++ b/camera/CameraUtils.cpp
@@ -23,6 +23,7 @@
#include <system/window.h>
#include <system/graphics.h>
+#include <cutils/properties.h>
#include <utils/Log.h>
namespace android {
@@ -122,4 +123,10 @@
return OK;
}
+bool CameraUtils::isCameraServiceDisabled() {
+ char value[PROPERTY_VALUE_MAX];
+ property_get("config.disable_cameraservice", value, "0");
+ return (strncmp(value, "0", 2) != 0 && strncasecmp(value, "false", 6) != 0);
+}
+
} /* namespace android */
diff --git a/camera/include/camera/CameraUtils.h b/camera/include/camera/CameraUtils.h
index f596f80..a397ccd 100644
--- a/camera/include/camera/CameraUtils.h
+++ b/camera/include/camera/CameraUtils.h
@@ -47,6 +47,11 @@
*/
static bool isNativeHandleMetadata(const sp<IMemory>& imageData);
+ /**
+ * Check if camera service is disabled on this device
+ */
+ static bool isCameraServiceDisabled();
+
private:
CameraUtils();
};
diff --git a/camera/ndk/impl/ACameraManager.cpp b/camera/ndk/impl/ACameraManager.cpp
index 419250c..73cabbf 100644
--- a/camera/ndk/impl/ACameraManager.cpp
+++ b/camera/ndk/impl/ACameraManager.cpp
@@ -24,6 +24,7 @@
#include <utils/Vector.h>
#include <cutils/properties.h>
#include <stdlib.h>
+#include <camera/CameraUtils.h>
#include <camera/VendorTagDescriptor.h>
using namespace android::acam;
@@ -70,12 +71,6 @@
mCameraService.clear();
}
-static bool isCameraServiceDisabled() {
- char value[PROPERTY_VALUE_MAX];
- property_get("config.disable_cameraservice", value, "0");
- return (strncmp(value, "0", 2) != 0 && strncasecmp(value, "false", 6) != 0);
-}
-
sp<hardware::ICameraService> CameraManagerGlobal::getCameraService() {
Mutex::Autolock _l(mLock);
return getCameraServiceLocked();
@@ -83,7 +78,7 @@
sp<hardware::ICameraService> CameraManagerGlobal::getCameraServiceLocked() {
if (mCameraService.get() == nullptr) {
- if (isCameraServiceDisabled()) {
+ if (CameraUtils::isCameraServiceDisabled()) {
return mCameraService;
}
diff --git a/media/codec2/components/hevc/C2SoftHevcDec.cpp b/media/codec2/components/hevc/C2SoftHevcDec.cpp
index 56dd26b..a374dfa 100644
--- a/media/codec2/components/hevc/C2SoftHevcDec.cpp
+++ b/media/codec2/components/hevc/C2SoftHevcDec.cpp
@@ -906,7 +906,6 @@
work->result = C2_CORRUPTED;
return;
}
- continue;
}
if (0 < s_decode_op.u4_pic_wd && 0 < s_decode_op.u4_pic_ht) {
if (mHeaderDecoded == false) {
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index c95d325..6cf0058 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -492,7 +492,10 @@
// We used to not report changes to these keys to the client.
const static std::set<std::string> sIgnoredKeys({
KEY_BIT_RATE,
+ KEY_FRAME_RATE,
KEY_MAX_BIT_RATE,
+ KEY_MAX_WIDTH,
+ KEY_MAX_HEIGHT,
"csd-0",
"csd-1",
"csd-2",
@@ -1226,6 +1229,8 @@
Mutexed<std::unique_ptr<Config>>::Locked configLocked(mConfig);
const std::unique_ptr<Config> &config = *configLocked;
+ config->queryConfiguration(comp);
+
mCallback->onComponentConfigured(config->mInputFormat, config->mOutputFormat);
}
@@ -1731,7 +1736,9 @@
{
Mutexed<std::unique_ptr<Config>>::Locked configLocked(mConfig);
const std::unique_ptr<Config> &config = *configLocked;
+ sp<AMessage> outputFormat = config->mOutputFormat;
config->queryConfiguration(comp);
+ RevertOutputFormatIfNeeded(outputFormat, config->mOutputFormat);
}
(void)mChannel->start(nullptr, nullptr, [&]{
diff --git a/media/libeffects/lvm/benchmarks/Android.bp b/media/libeffects/lvm/benchmarks/Android.bp
index 60a9772..8a25b85 100644
--- a/media/libeffects/lvm/benchmarks/Android.bp
+++ b/media/libeffects/lvm/benchmarks/Android.bp
@@ -28,6 +28,7 @@
cc_benchmark {
name: "reverb_benchmark",
vendor: true,
+ host_supported: true,
include_dirs: [
"frameworks/av/media/libeffects/lvm/wrapper/Reverb",
],
diff --git a/media/libeffects/lvm/benchmarks/lvm_benchmark.cpp b/media/libeffects/lvm/benchmarks/lvm_benchmark.cpp
index e2e4a85..bdb66d8 100644
--- a/media/libeffects/lvm/benchmarks/lvm_benchmark.cpp
+++ b/media/libeffects/lvm/benchmarks/lvm_benchmark.cpp
@@ -53,8 +53,6 @@
constexpr size_t kNumChMasks = std::size(kChMasks);
constexpr int kSampleRate = 44100;
-// TODO(b/131240940) Remove once effects are updated to produce mono output
-constexpr size_t kMinOutputChannelCount = 2;
/*******************************************************************
* A test result running on Pixel 3 for comparison.
@@ -64,6 +62,10 @@
* -----------------------------------------------------
* Benchmark Time CPU Iterations
* -----------------------------------------------------
+ * BM_LVM/1/0 52123 ns 51971 ns 13437
+ * BM_LVM/1/1 75397 ns 75175 ns 9382
+ * BM_LVM/1/2 40253 ns 40140 ns 17418
+ * BM_LVM/1/3 19918 ns 19860 ns 35230
* BM_LVM/2/0 62455 ns 62283 ns 11214
* BM_LVM/2/1 110086 ns 109751 ns 6350
* BM_LVM/2/2 44017 ns 43890 ns 15982
@@ -203,7 +205,7 @@
// Run the test
for (auto _ : state) {
- std::vector<float> output(kFrameCount * std::max(channelCount, kMinOutputChannelCount));
+ std::vector<float> output(kFrameCount * channelCount);
benchmark::DoNotOptimize(input.data());
benchmark::DoNotOptimize(output.data());
@@ -224,8 +226,7 @@
}
static void LVMArgs(benchmark::internal::Benchmark* b) {
- // TODO(b/131240940) Test single channel once effects are updated to process mono data
- for (int i = 2; i <= kNumChMasks; i++) {
+ for (int i = FCC_1; i <= kNumChMasks; i++) {
for (int j = 0; j < kNumEffectUuids; ++j) {
b->Args({i, j});
}
diff --git a/media/libeffects/lvm/lib/Android.bp b/media/libeffects/lvm/lib/Android.bp
index ed7ef7f..a044295 100644
--- a/media/libeffects/lvm/lib/Android.bp
+++ b/media/libeffects/lvm/lib/Android.bp
@@ -140,6 +140,7 @@
},
vendor: true,
+ host_supported: true,
srcs: [
"Reverb/src/LVREV_ApplyNewSettings.cpp",
"Reverb/src/LVREV_ClearAudioBuffers.cpp",
diff --git a/media/libeffects/lvm/lib/Bass/src/LVDBE_Control.cpp b/media/libeffects/lvm/lib/Bass/src/LVDBE_Control.cpp
index d860ad0..3fc9e95 100644
--- a/media/libeffects/lvm/lib/Bass/src/LVDBE_Control.cpp
+++ b/media/libeffects/lvm/lib/Bass/src/LVDBE_Control.cpp
@@ -277,8 +277,8 @@
/*
* Create biquad instance
*/
- pInstance->pHPFBiquad.reset(new android::audio_utils::BiquadFilter<LVM_FLOAT>(
- (FCC_1 == pParams->NrChannels) ? FCC_2 : pParams->NrChannels));
+ pInstance->pHPFBiquad.reset(
+ new android::audio_utils::BiquadFilter<LVM_FLOAT>(pParams->NrChannels));
/*
* Update the filters
diff --git a/media/libeffects/lvm/lib/Bass/src/LVDBE_Init.cpp b/media/libeffects/lvm/lib/Bass/src/LVDBE_Init.cpp
index 979644c..761c6ce 100644
--- a/media/libeffects/lvm/lib/Bass/src/LVDBE_Init.cpp
+++ b/media/libeffects/lvm/lib/Bass/src/LVDBE_Init.cpp
@@ -57,10 +57,7 @@
* Create the instance handle if not already initialised
*/
if (*phInstance == LVM_NULL) {
- *phInstance = calloc(1, sizeof(*pInstance));
- }
- if (*phInstance == LVM_NULL) {
- return LVDBE_NULLADDRESS;
+ *phInstance = new LVDBE_Instance_t;
}
pInstance = (LVDBE_Instance_t*)*phInstance;
@@ -185,6 +182,6 @@
free(pInstance->pData);
pInstance->pData = LVM_NULL;
}
- free(pInstance);
+ delete pInstance;
*phInstance = LVM_NULL;
}
diff --git a/media/libeffects/lvm/lib/Bass/src/LVDBE_Process.cpp b/media/libeffects/lvm/lib/Bass/src/LVDBE_Process.cpp
index 8c62e71..0969053 100644
--- a/media/libeffects/lvm/lib/Bass/src/LVDBE_Process.cpp
+++ b/media/libeffects/lvm/lib/Bass/src/LVDBE_Process.cpp
@@ -79,11 +79,7 @@
const LVM_UINT16 NrFrames) // updated to use samples = frames * channels.
{
LVDBE_Instance_t* pInstance = (LVDBE_Instance_t*)hInstance;
-
- /*Extract number of Channels info*/
- // Mono passed in as stereo
- const LVM_INT32 NrChannels =
- pInstance->Params.NrChannels == 1 ? 2 : pInstance->Params.NrChannels;
+ const LVM_INT32 NrChannels = pInstance->Params.NrChannels;
const LVM_INT32 NrSamples = NrChannels * NrFrames;
/* Space to store DBE path computation */
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Buffers.cpp b/media/libeffects/lvm/lib/Bundle/src/LVM_Buffers.cpp
index fbb0fe1..1d913d7 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Buffers.cpp
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Buffers.cpp
@@ -487,10 +487,6 @@
void LVM_BufferUnmanagedOut(LVM_Handle_t hInstance, LVM_UINT16* pNumSamples) {
LVM_Instance_t* pInstance = (LVM_Instance_t*)hInstance;
LVM_INT16 NumChannels = pInstance->NrChannels;
- if (NumChannels == 1) {
- /* Mono input is processed as stereo by LVM module */
- NumChannels = 2;
- }
#undef NrFrames
#define NrFrames (*pNumSamples) // alias for clarity
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Init.cpp b/media/libeffects/lvm/lib/Bundle/src/LVM_Init.cpp
index c1b375e..b092970 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Init.cpp
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Init.cpp
@@ -93,10 +93,7 @@
/*
* Create the instance handle
*/
- *phInstance = (LVM_Handle_t)calloc(1, sizeof(*pInstance));
- if (*phInstance == LVM_NULL) {
- return LVM_NULLADDRESS;
- }
+ *phInstance = new LVM_Instance_t;
pInstance = (LVM_Instance_t*)*phInstance;
pInstance->InstParams = *pInstParams;
@@ -543,7 +540,7 @@
pInstance->pPSAInput = LVM_NULL;
}
- free(*phInstance);
+ delete pInstance;
return;
}
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Process.cpp b/media/libeffects/lvm/lib/Bundle/src/LVM_Process.cpp
index 82c0e68..20058a1 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Process.cpp
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Process.cpp
@@ -111,19 +111,6 @@
}
/*
- * Convert from Mono if necessary
- */
- if (pInstance->Params.SourceFormat == LVM_MONO) {
- MonoTo2I_Float(pInData, /* Source */
- pOutData, /* Destination */
- (LVM_INT16)NumSamples); /* Number of input samples */
- pInput = pOutData;
- pToProcess = pOutData;
- NrChannels = 2;
- ChMask = AUDIO_CHANNEL_OUT_STEREO;
- }
-
- /*
* Process the data with managed buffers
*/
while (SampleCount != 0) {
diff --git a/media/libeffects/lvm/lib/Common/lib/LVM_Types.h b/media/libeffects/lvm/lib/Common/lib/LVM_Types.h
index b95d076..5cdcf35 100644
--- a/media/libeffects/lvm/lib/Common/lib/LVM_Types.h
+++ b/media/libeffects/lvm/lib/Common/lib/LVM_Types.h
@@ -121,6 +121,19 @@
LVM_FS_DUMMY = LVM_MAXENUM
} LVM_Fs_en;
+static inline LVM_Fs_en lvmFsForSampleRate(int sampleRate) {
+ static const std::map<int, LVM_Fs_en> kLVMFsMap = {
+ {8000, LVM_FS_8000}, {11025, LVM_FS_11025}, {12000, LVM_FS_12000},
+ {16000, LVM_FS_16000}, {22050, LVM_FS_22050}, {24000, LVM_FS_24000},
+ {32000, LVM_FS_32000}, {44100, LVM_FS_44100}, {48000, LVM_FS_48000},
+ {88200, LVM_FS_88200}, {96000, LVM_FS_96000}, {176400, LVM_FS_176400},
+ {192000, LVM_FS_192000}};
+ if (kLVMFsMap.find(sampleRate) != kLVMFsMap.end()) {
+ return kLVMFsMap.at(sampleRate);
+ }
+ return LVM_FS_INVALID;
+}
+
/* Memory Types */
typedef enum {
LVM_PERSISTENT_SLOW_DATA = LVM_MEMREGION_PERSISTENT_SLOW_DATA,
diff --git a/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h b/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h
index 281d941..18de85b 100644
--- a/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h
+++ b/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h
@@ -29,17 +29,18 @@
void Copy_Float(const LVM_FLOAT* src, LVM_FLOAT* dst, LVM_INT16 n);
void Copy_Float_Mc_Stereo(const LVM_FLOAT* src, LVM_FLOAT* dst, LVM_INT16 NrFrames,
LVM_INT32 NrChannels);
-void Copy_Float_Stereo_Mc(const LVM_FLOAT* src, LVM_FLOAT* StereoOut, LVM_FLOAT* dst,
+void Copy_Float_Stereo_Mc(const LVM_FLOAT* src, const LVM_FLOAT* StereoOut, LVM_FLOAT* dst,
LVM_INT16 NrFrames, LVM_INT32 NrChannels);
void Mult3s_Float(const LVM_FLOAT* src, const LVM_FLOAT val, LVM_FLOAT* dst, LVM_INT16 n);
-void DelayMix_Float(const LVM_FLOAT* src, /* Source 1, to be delayed */
- LVM_FLOAT* delay, /* Delay buffer */
- LVM_INT16 size, /* Delay size */
- LVM_FLOAT* dst, /* Source/destination */
- LVM_INT16* pOffset, /* Delay offset */
- LVM_INT16 n); /* Number of stereo samples */
+void DelayMix_Float(const LVM_FLOAT* src, /* Source 1, to be delayed */
+ LVM_FLOAT* delay, /* Delay buffer */
+ LVM_INT16 size, /* Delay size */
+ LVM_FLOAT* dst, /* Source/destination */
+ LVM_INT16* pOffset, /* Delay offset */
+ LVM_INT16 n, /* Number of stereo samples */
+ LVM_INT32 NrChannels); /* Number of channels */
void Add2_Sat_Float(const LVM_FLOAT* src, LVM_FLOAT* dst, LVM_INT16 n);
void Mac3s_Sat_Float(const LVM_FLOAT* src, const LVM_FLOAT val, LVM_FLOAT* dst, LVM_INT16 n);
diff --git a/media/libeffects/lvm/lib/Common/src/Copy_16.cpp b/media/libeffects/lvm/lib/Common/src/Copy_16.cpp
index 7046a94..1fe7470 100644
--- a/media/libeffects/lvm/lib/Common/src/Copy_16.cpp
+++ b/media/libeffects/lvm/lib/Common/src/Copy_16.cpp
@@ -51,25 +51,32 @@
}
// Merge a multichannel source with stereo contained in StereoOut, to dst.
-void Copy_Float_Stereo_Mc(const LVM_FLOAT* src, LVM_FLOAT* StereoOut, LVM_FLOAT* dst,
+void Copy_Float_Stereo_Mc(const LVM_FLOAT* src, const LVM_FLOAT* StereoOut, LVM_FLOAT* dst,
LVM_INT16 NrFrames, /* Number of frames*/
LVM_INT32 NrChannels) {
LVM_INT16 ii, jj;
- // pack dst with stereo information of StereoOut
- // together with the upper channels of src.
- StereoOut += 2 * (NrFrames - 1);
- dst += NrChannels * (NrFrames - 1);
- src += NrChannels * (NrFrames - 1);
- for (ii = NrFrames; ii != 0; ii--) {
- dst[1] = StereoOut[1];
- dst[0] = StereoOut[0]; // copy 1 before 0 is required for NrChannels == 3.
- for (jj = 2; jj < NrChannels; jj++) {
- dst[jj] = src[jj];
+ if (NrChannels >= FCC_2) {
+ // pack dst with stereo information of StereoOut
+ // together with the upper channels of src.
+ StereoOut += 2 * (NrFrames - 1);
+ dst += NrChannels * (NrFrames - 1);
+ src += NrChannels * (NrFrames - 1);
+
+ for (ii = NrFrames; ii != 0; ii--) {
+ dst[1] = StereoOut[1];
+ dst[0] = StereoOut[0]; // copy 1 before 0 is required for NrChannels == 3.
+ for (jj = FCC_2; jj < NrChannels; jj++) {
+ dst[jj] = src[jj];
+ }
+ dst -= NrChannels;
+ src -= NrChannels;
+ StereoOut -= 2;
}
- dst -= NrChannels;
- src -= NrChannels;
- StereoOut -= 2;
+ } else {
+ Copy_Float((const LVM_FLOAT*)StereoOut, /* Source */
+ (LVM_FLOAT*)dst, /* Destination */
+ (LVM_INT16)NrFrames); /* Number of frames */
}
}
/**********************************************************************************/
diff --git a/media/libeffects/lvm/lib/Common/src/DelayMix_16x16.cpp b/media/libeffects/lvm/lib/Common/src/DelayMix_16x16.cpp
index d2537eb..a346636 100644
--- a/media/libeffects/lvm/lib/Common/src/DelayMix_16x16.cpp
+++ b/media/libeffects/lvm/lib/Common/src/DelayMix_16x16.cpp
@@ -26,34 +26,50 @@
LVM_INT16 size, /* Delay size */
LVM_FLOAT* dst, /* Source/destination */
LVM_INT16* pOffset, /* Delay offset */
- LVM_INT16 n) /* Number of stereo samples */
+ LVM_INT16 n, /* Number of samples */
+ LVM_INT32 NrChannels) /* Number of channels */
{
LVM_INT16 i;
LVM_INT16 Offset = *pOffset;
LVM_FLOAT temp;
for (i = 0; i < n; i++) {
- /* Left channel */
- temp = (LVM_FLOAT)((LVM_FLOAT)(*dst + (LVM_FLOAT)delay[Offset]) / 2.0f);
- *dst = temp;
- dst++;
+ if (NrChannels == FCC_1) {
+ temp = (LVM_FLOAT)(*dst + (LVM_FLOAT)delay[Offset]) / 2.0f;
+ *dst = temp;
+ dst++;
- delay[Offset] = *src;
- Offset++;
- src++;
+ delay[Offset] = *src;
+ Offset++;
+ src++;
- /* Right channel */
- temp = (LVM_FLOAT)((LVM_FLOAT)(*dst - (LVM_FLOAT)delay[Offset]) / 2.0f);
- *dst = temp;
- dst++;
+ /* Make the reverb delay buffer a circular buffer */
+ if (Offset >= size) {
+ Offset = 0;
+ }
+ } else {
+ /* Left channel */
+ temp = (LVM_FLOAT)(*dst + (LVM_FLOAT)delay[Offset]) / 2.0f;
+ *dst = temp;
+ dst++;
- delay[Offset] = *src;
- Offset++;
- src++;
+ delay[Offset] = *src;
+ Offset++;
+ src++;
- /* Make the reverb delay buffer a circular buffer */
- if (Offset >= size) {
- Offset = 0;
+ /* Right channel */
+ temp = (LVM_FLOAT)(*dst - (LVM_FLOAT)delay[Offset]) / 2.0f;
+ *dst = temp;
+ dst++;
+
+ delay[Offset] = *src;
+ Offset++;
+ src++;
+
+ /* Make the reverb delay buffer a circular buffer */
+ if (Offset >= size) {
+ Offset = 0;
+ }
}
}
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_2i_D16C31_SAT.cpp b/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_2i_D16C31_SAT.cpp
index 58bc06e..b0aa172 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_2i_D16C31_SAT.cpp
+++ b/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_2i_D16C31_SAT.cpp
@@ -56,10 +56,11 @@
Mix_Private_FLOAT_st* pInstance[NrChannels];
if (audio_channel_mask_get_representation(ChMask) == AUDIO_CHANNEL_REPRESENTATION_INDEX) {
- for (int i = 0; i < 2; i++) {
+ int loopLimit = (NrChannels == FCC_1) ? NrChannels : FCC_2;
+ for (int i = 0; i < loopLimit; i++) {
pInstance[i] = pMixPrivInst[i];
}
- for (int i = 2; i < NrChannels; i++) {
+ for (int i = loopLimit; i < NrChannels; i++) {
pInstance[i] = pMixPrivInst[2];
}
} else {
diff --git a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.cpp b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.cpp
index 3ab6afb..7e5caed 100644
--- a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.cpp
+++ b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.cpp
@@ -311,9 +311,8 @@
/*
* Create biquad instance
*/
- pInstance->eqBiquad.resize(
- pParams->NBands, android::audio_utils::BiquadFilter<LVM_FLOAT>(
- (FCC_1 == pParams->NrChannels) ? FCC_2 : pParams->NrChannels));
+ pInstance->eqBiquad.resize(pParams->NBands,
+ android::audio_utils::BiquadFilter<LVM_FLOAT>(pParams->NrChannels));
LVEQNB_ClearFilterHistory(pInstance);
if (bChange || modeChange) {
diff --git a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Init.cpp b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Init.cpp
index 833ee5d..37e6d4d 100644
--- a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Init.cpp
+++ b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Init.cpp
@@ -52,10 +52,7 @@
LVEQNB_Capabilities_t* pCapabilities, void* pScratch) {
LVEQNB_Instance_t* pInstance;
- *phInstance = calloc(1, sizeof(*pInstance));
- if (phInstance == LVM_NULL) {
- return LVEQNB_NULLADDRESS;
- }
+ *phInstance = new LVEQNB_Instance_t;
pInstance = (LVEQNB_Instance_t*)*phInstance;
pInstance->Capabilities = *pCapabilities;
@@ -146,6 +143,6 @@
free(pInstance->pBiquadType);
pInstance->pBiquadType = LVM_NULL;
}
- free(pInstance);
+ delete pInstance;
*phInstance = LVM_NULL;
}
diff --git a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Process.cpp b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Process.cpp
index 8992803..b177dd4 100644
--- a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Process.cpp
+++ b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Process.cpp
@@ -62,10 +62,7 @@
LVEQNB_Handle_t hInstance, const LVM_FLOAT* pInData, LVM_FLOAT* pOutData,
const LVM_UINT16 NrFrames) { // updated to use samples = frames * channels.
LVEQNB_Instance_t* pInstance = (LVEQNB_Instance_t*)hInstance;
-
- // Mono passed in as stereo
- const LVM_INT32 NrChannels =
- pInstance->Params.NrChannels == 1 ? 2 : pInstance->Params.NrChannels;
+ const LVM_INT32 NrChannels = pInstance->Params.NrChannels;
const LVM_INT32 NrSamples = NrChannels * NrFrames;
/* Check for NULL pointers */
@@ -104,7 +101,6 @@
* Check if band is non-zero dB gain
*/
if (pInstance->pBandDefinitions[i].Gain != 0) {
-
/*
* Select single or double precision as required
*/
diff --git a/media/libeffects/lvm/lib/Reverb/lib/LVREV.h b/media/libeffects/lvm/lib/Reverb/lib/LVREV.h
index 484787a..489bc6f 100644
--- a/media/libeffects/lvm/lib/Reverb/lib/LVREV.h
+++ b/media/libeffects/lvm/lib/Reverb/lib/LVREV.h
@@ -191,6 +191,23 @@
/****************************************************************************************/
/* */
+/* FUNCTION: LVREV_FreeInstance */
+/* */
+/* DESCRIPTION: */
+/* This function is used to free the internal allocations of the module. */
+/* */
+/* PARAMETERS: */
+/* hInstance Instance handle */
+/* */
+/* RETURNS: */
+/* LVREV_SUCCESS free instance succeeded */
+/* LVREV_NULLADDRESS Instance is NULL */
+/* */
+/****************************************************************************************/
+LVREV_ReturnStatus_en LVREV_FreeInstance(LVREV_Handle_t hInstance);
+
+/****************************************************************************************/
+/* */
/* FUNCTION: LVXX_GetControlParameters */
/* */
/* DESCRIPTION: */
diff --git a/media/libeffects/lvm/lib/Reverb/src/LVREV_GetInstanceHandle.cpp b/media/libeffects/lvm/lib/Reverb/src/LVREV_GetInstanceHandle.cpp
index 9a797bd..bf71634 100644
--- a/media/libeffects/lvm/lib/Reverb/src/LVREV_GetInstanceHandle.cpp
+++ b/media/libeffects/lvm/lib/Reverb/src/LVREV_GetInstanceHandle.cpp
@@ -114,7 +114,7 @@
* Set the instance handle if not already initialised
*/
if (*phInstance == LVM_NULL) {
- *phInstance = InstAlloc_AddMember(&SlowData, sizeof(LVREV_Instance_st));
+ *phInstance = new LVREV_Instance_st;
}
pLVREV_Private = (LVREV_Instance_st*)*phInstance;
pLVREV_Private->MemoryTable = *pMemoryTable;
@@ -269,4 +269,27 @@
return LVREV_SUCCESS;
}
+/****************************************************************************************/
+/* */
+/* FUNCTION: LVREV_FreeInstance */
+/* */
+/* DESCRIPTION: */
+/* This function is used to free the internal allocations of the module. */
+/* */
+/* PARAMETERS: */
+/* hInstance Instance handle */
+/* */
+/* RETURNS: */
+/* LVREV_SUCCESS free instance succeeded */
+/* LVREV_NULLADDRESS Instance is NULL */
+/* */
+/****************************************************************************************/
+LVREV_ReturnStatus_en LVREV_FreeInstance(LVREV_Handle_t hInstance) {
+ if (hInstance == LVM_NULL) {
+ return LVREV_NULLADDRESS;
+ }
+
+ delete (LVREV_Instance_st*)hInstance;
+ return LVREV_SUCCESS;
+}
/* End of file */
diff --git a/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Init.cpp b/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Init.cpp
index 9874dcc..5ca8543 100644
--- a/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Init.cpp
+++ b/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Init.cpp
@@ -49,10 +49,7 @@
LVM_UINT32 BufferLength = 0;
/* Set the instance handle if not already initialised */
- *phInstance = calloc(1, sizeof(*pLVPSA_Inst));
- if (*phInstance == LVM_NULL) {
- return LVPSA_ERROR_NULLADDRESS;
- }
+ *phInstance = new LVPSA_InstancePr_t;
pLVPSA_Inst = (LVPSA_InstancePr_t*)*phInstance;
pLVPSA_Inst->pScratch = pScratch;
@@ -191,6 +188,6 @@
free(pLVPSA_Inst->pQPD_Taps);
pLVPSA_Inst->pQPD_Taps = LVM_NULL;
}
- free(pLVPSA_Inst);
+ delete pLVPSA_Inst;
*phInstance = LVM_NULL;
}
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_BypassMix.cpp b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_BypassMix.cpp
index efca27d..f805fca 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_BypassMix.cpp
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_BypassMix.cpp
@@ -200,6 +200,8 @@
LVM_UINT16 NumSamples) {
LVCS_Instance_t* pInstance = (LVCS_Instance_t*)hInstance;
LVCS_BypassMix_t* pConfig = (LVCS_BypassMix_t*)&pInstance->BypassMix;
+ LVM_UINT16 destNumSamples =
+ (pInstance->Params.NrChannels == FCC_1) ? NumSamples : FCC_2 * NumSamples;
/*
* Check if the bypass mixer is enabled
@@ -209,12 +211,12 @@
* Apply the bypass mix
*/
LVC_MixSoft_2St_D16C31_SAT(&pConfig->Mixer_Instance, pProcessed, (LVM_FLOAT*)pUnprocessed,
- pOutData, (LVM_INT16)(2 * NumSamples));
+ pOutData, (LVM_INT16)destNumSamples);
/*
* Apply output gain correction shift
*/
Shift_Sat_Float((LVM_INT16)pConfig->Output_Shift, (LVM_FLOAT*)pOutData,
- (LVM_FLOAT*)pOutData, (LVM_INT16)(2 * NumSamples)); /* Left and right*/
+ (LVM_FLOAT*)pOutData, (LVM_INT16)destNumSamples);
}
return (LVCS_SUCCESS);
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Control.cpp b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Control.cpp
index 8f88986..89f2f3b 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Control.cpp
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Control.cpp
@@ -180,7 +180,9 @@
if (pInstance->bInOperatingModeTransition != LVM_TRUE) {
pInstance->bTimerDone = LVM_FALSE;
pInstance->TimerParams.TimeInMs =
- (LVM_INT16)(((pInstance->Reverberation.DelaySize << 2) /
+ (LVM_INT16)(((pInstance->Params.NrChannels == FCC_1
+ ? pInstance->Reverberation.DelaySize << 3
+ : pInstance->Reverberation.DelaySize << 2) /
pInstance->TimerParams.SamplingRate) +
1);
LVM_Timer_Init(&pInstance->TimerInstance, &pInstance->TimerParams);
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Equaliser.cpp b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Equaliser.cpp
index c8ad94e..1746786 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Equaliser.cpp
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Equaliser.cpp
@@ -74,7 +74,8 @@
pEqualiserCoefTable[Offset].A0, pEqualiserCoefTable[Offset].A1,
pEqualiserCoefTable[Offset].A2, -(pEqualiserCoefTable[Offset].B1),
-(pEqualiserCoefTable[Offset].B2)};
- pInstance->pEqBiquad.reset(new android::audio_utils::BiquadFilter<LVM_FLOAT>(FCC_2, coefs));
+ pInstance->pEqBiquad.reset(new android::audio_utils::BiquadFilter<LVM_FLOAT>(
+ (pParams->NrChannels == FCC_1) ? FCC_1 : FCC_2, coefs));
}
return (LVCS_SUCCESS);
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Init.cpp b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Init.cpp
index ba3202f..d60b360 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Init.cpp
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Init.cpp
@@ -55,10 +55,7 @@
* Create the instance handle if not already initialised
*/
if (*phInstance == LVM_NULL) {
- *phInstance = calloc(1, sizeof(*pInstance));
- }
- if (*phInstance == LVM_NULL) {
- return LVCS_NULLADDRESS;
+ *phInstance = new LVCS_Instance_t;
}
pInstance = (LVCS_Instance_t*)*phInstance;
@@ -123,7 +120,7 @@
if (pInstance == LVM_NULL) {
return;
}
- free(pInstance);
+ delete pInstance;
*phInstance = LVM_NULL;
return;
}
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Process.cpp b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Process.cpp
index d18f2c3..6af0f75 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Process.cpp
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Process.cpp
@@ -75,16 +75,6 @@
LVM_INT32 channels = pInstance->Params.NrChannels;
#define NrFrames NumSamples // alias for clarity
- /*In case of mono processing, stereo input is created from mono
- *and stored in pInData before applying any of the effects.
- *However we do not update the value pInstance->Params.NrChannels
- *at this point.
- *So to treat the pInData as stereo we are setting channels to 2
- */
- if (channels == 1) {
- channels = 2;
- }
-
pScratch = (LVM_FLOAT*)pInstance->pScratch;
/*
@@ -97,11 +87,16 @@
*/
pInput = pScratch + (2 * NrFrames);
pStIn = pScratch + ((LVCS_SCRATCHBUFFERS - 2) * NrFrames);
- /* The first two channel data is extracted from the input data and
- * copied into pInput buffer
- */
- Copy_Float_Mc_Stereo((LVM_FLOAT*)pInData, (LVM_FLOAT*)pInput, NrFrames, channels);
- Copy_Float((LVM_FLOAT*)pInput, (LVM_FLOAT*)pStIn, (LVM_INT16)(2 * NrFrames));
+ if (channels == FCC_1) {
+ Copy_Float((LVM_FLOAT*)pInData, (LVM_FLOAT*)pInput, (LVM_INT16)NrFrames);
+ Copy_Float((LVM_FLOAT*)pInput, (LVM_FLOAT*)pStIn, (LVM_INT16)NrFrames);
+ } else {
+ /* The first two channel data is extracted from the input data and
+ * copied into pInput buffer
+ */
+ Copy_Float_Mc_Stereo((LVM_FLOAT*)pInData, (LVM_FLOAT*)pInput, NrFrames, channels);
+ Copy_Float((LVM_FLOAT*)pInput, (LVM_FLOAT*)pStIn, (LVM_INT16)(FCC_2 * NrFrames));
+ }
/*
* Call the stereo enhancer
*/
@@ -172,10 +167,10 @@
LVCS_ReturnStatus_en err;
/*Extract number of Channels info*/
LVM_INT32 channels = pInstance->Params.NrChannels;
+ LVM_UINT16 destNumSamples = (channels == FCC_1) ? NumSamples : FCC_2 * NumSamples;
+ LVM_INT32 compGainInterval =
+ (channels == FCC_1) ? LVCS_COMPGAINFRAME : FCC_2 * LVCS_COMPGAINFRAME;
#define NrFrames NumSamples // alias for clarity
- if (channels == 1) {
- channels = 2;
- }
/*
* Check the number of samples is not too large
*/
@@ -227,7 +222,7 @@
if (NumSamples < LVCS_COMPGAINFRAME) {
NonLinComp_Float(Gain, /* Compressor gain setting */
- pStereoOut, pStereoOut, (LVM_INT32)(2 * NrFrames));
+ pStereoOut, pStereoOut, (LVM_INT32)destNumSamples);
} else {
LVM_FLOAT GainStep;
LVM_FLOAT FinalGain;
@@ -266,12 +261,15 @@
if (SampleToProcess > LVCS_COMPGAINFRAME) {
NonLinComp_Float(Gain, /* Compressor gain setting */
- pOutPtr, pOutPtr, (LVM_INT32)(2 * LVCS_COMPGAINFRAME));
- pOutPtr += (2 * LVCS_COMPGAINFRAME);
+ pOutPtr, pOutPtr, compGainInterval);
+ pOutPtr += compGainInterval;
SampleToProcess = (LVM_INT16)(SampleToProcess - LVCS_COMPGAINFRAME);
} else {
NonLinComp_Float(Gain, /* Compressor gain setting */
- pOutPtr, pOutPtr, (LVM_INT32)(2 * SampleToProcess));
+ pOutPtr, pOutPtr,
+ (channels == FCC_1)
+ ? (LVM_INT32)(SampleToProcess)
+ : (LVM_INT32)(FCC_2 * SampleToProcess));
SampleToProcess = 0;
}
}
@@ -297,7 +295,7 @@
LVM_Timer(&pInstance->TimerInstance, (LVM_INT16)NumSamples);
}
}
- Copy_Float_Stereo_Mc(pInData, pStereoOut, pOutData, NrFrames, channels);
+ Copy_Float_Stereo_Mc(pInData, (const LVM_FLOAT*)pStereoOut, pOutData, NrFrames, channels);
} else {
if (pInData != pOutData) {
/*
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_ReverbGenerator.cpp b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_ReverbGenerator.cpp
index 15acda9..12b1dc3 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_ReverbGenerator.cpp
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_ReverbGenerator.cpp
@@ -65,7 +65,6 @@
LVCS_ReverbGenerator_t* pConfig = (LVCS_ReverbGenerator_t*)&pInstance->Reverberation;
const BiquadA012B12CoefsSP_t* pReverbCoefTable;
-
/*
* Initialise the delay and filters if:
* - the sample rate has changed
@@ -79,7 +78,8 @@
*/
Delay = (LVM_UINT16)LVCS_StereoDelayCS[(LVM_UINT16)pParams->SampleRate];
- pConfig->DelaySize = (LVM_INT16)(2 * Delay);
+ pConfig->DelaySize =
+ (pParams->NrChannels == FCC_1) ? (LVM_INT16)Delay : (LVM_INT16)(FCC_2 * Delay);
pConfig->DelayOffset = 0;
LoadConst_Float(0, /* Value */
(LVM_FLOAT*)&pConfig->StereoSamples[0], /* Destination */
@@ -95,8 +95,8 @@
pReverbCoefTable[Offset].A0, pReverbCoefTable[Offset].A1,
pReverbCoefTable[Offset].A2, -(pReverbCoefTable[Offset].B1),
-(pReverbCoefTable[Offset].B2)};
- pInstance->pRevBiquad.reset(
- new android::audio_utils::BiquadFilter<LVM_FLOAT>(FCC_2, coefs));
+ pInstance->pRevBiquad.reset(new android::audio_utils::BiquadFilter<LVM_FLOAT>(
+ (pParams->NrChannels == FCC_1) ? FCC_1 : FCC_2, coefs));
/*
* Setup the mixer
@@ -155,6 +155,9 @@
LVCS_Instance_t* pInstance = (LVCS_Instance_t*)hInstance;
LVCS_ReverbGenerator_t* pConfig = (LVCS_ReverbGenerator_t*)&pInstance->Reverberation;
LVM_FLOAT* pScratch;
+ LVM_INT32 NumChannels = pInstance->Params.NrChannels;
+ LVM_UINT16 destNumSamples =
+ (pInstance->Params.NrChannels == FCC_1) ? NumSamples : FCC_2 * NumSamples;
pScratch = (LVM_FLOAT*)pInstance->pScratch;
@@ -165,9 +168,9 @@
/*
* Reverb not required so just copy the data
*/
- Copy_Float((LVM_FLOAT*)pInData, /* Source */
- (LVM_FLOAT*)pOutData, /* Destination */
- (LVM_INT16)(2 * NumSamples)); /* Left and right */
+ Copy_Float((LVM_FLOAT*)pInData, /* Source */
+ (LVM_FLOAT*)pOutData, /* Destination */
+ (LVM_INT16)destNumSamples); /* Number of frames */
}
/*
@@ -188,9 +191,9 @@
/*
* Copy the input data to the scratch memory
*/
- Copy_Float((LVM_FLOAT*)pInData, /* Source */
- (LVM_FLOAT*)pScratch, /* Destination */
- (LVM_INT16)(2 * NumSamples)); /* Left and right */
+ Copy_Float((LVM_FLOAT*)pInData, /* Source */
+ (LVM_FLOAT*)pScratch, /* Destination */
+ (LVM_INT16)destNumSamples); /* Number of frames */
/*
* Filter the data
@@ -198,13 +201,13 @@
pInstance->pRevBiquad->process(pScratch, pScratch, NumSamples);
Mult3s_Float((LVM_FLOAT*)pScratch, pConfig->ReverbLevel, (LVM_FLOAT*)pScratch,
- (LVM_INT16)(2 * NumSamples));
+ (LVM_INT16)destNumSamples); /* Number of frames */
/*
* Apply the delay mix
*/
DelayMix_Float((LVM_FLOAT*)pScratch, &pConfig->StereoSamples[0], pConfig->DelaySize,
- pOutData, &pConfig->DelayOffset, (LVM_INT16)NumSamples);
+ pOutData, &pConfig->DelayOffset, (LVM_INT16)NumSamples, NumChannels);
}
return (LVCS_SUCCESS);
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_StereoEnhancer.cpp b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_StereoEnhancer.cpp
index 00bb26c..e3ff604 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_StereoEnhancer.cpp
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_StereoEnhancer.cpp
@@ -55,7 +55,6 @@
LVCS_Instance_t* pInstance = (LVCS_Instance_t*)hInstance;
const BiquadA012B12CoefsSP_t* pSESideCoefs;
-
/*
* If the sample rate or speaker type has changed update the filters
*/
@@ -129,6 +128,8 @@
LVCS_StereoEnhancer_t* pConfig = (LVCS_StereoEnhancer_t*)&pInstance->StereoEnhancer;
LVM_FLOAT* pScratch;
pScratch = (LVM_FLOAT*)pInstance->pScratch;
+ LVM_INT32 NumChannels = pInstance->Params.NrChannels;
+ LVM_UINT16 destNumSamples = (NumChannels == FCC_1) ? NumSamples : FCC_2 * NumSamples;
/*
* Check if the Stereo Enhancer is enabled
*/
@@ -136,7 +137,12 @@
/*
* Convert from stereo to middle and side
*/
- From2iToMS_Float(pInData, pScratch, pScratch + NumSamples, (LVM_INT16)NumSamples);
+ if (NumChannels == 1) {
+ // Copy same input to scratch as Middle data
+ Copy_Float((LVM_FLOAT*)pInData, (LVM_FLOAT*)pScratch, (LVM_INT16)NumSamples);
+ } else {
+ From2iToMS_Float(pInData, pScratch, pScratch + NumSamples, (LVM_INT16)NumSamples);
+ }
/*
* Apply filter to the middle signal
@@ -159,18 +165,23 @@
NumSamples);
}
- /*
- * Convert from middle and side to stereo
- */
- MSTo2i_Sat_Float(pScratch, pScratch + NumSamples, pOutData, (LVM_INT16)NumSamples);
+ if (NumChannels == 1) {
+ // Copy processed Middle data from scratch to pOutData
+ Copy_Float((LVM_FLOAT*)pScratch, (LVM_FLOAT*)pOutData, (LVM_INT16)NumSamples);
+ } else {
+ /*
+ * Convert from middle and side to stereo
+ */
+ MSTo2i_Sat_Float(pScratch, pScratch + NumSamples, pOutData, (LVM_INT16)NumSamples);
+ }
} else {
/*
* The stereo enhancer is disabled so just copy the data
*/
- Copy_Float((LVM_FLOAT*)pInData, /* Source */
- (LVM_FLOAT*)pOutData, /* Destination */
- (LVM_INT16)(2 * NumSamples)); /* Left and right */
+ Copy_Float((LVM_FLOAT*)pInData, /* Source */
+ (LVM_FLOAT*)pOutData, /* Destination */
+ (LVM_INT16)destNumSamples); /* Number of frames */
}
return (LVCS_SUCCESS);
diff --git a/media/libeffects/lvm/tests/Android.bp b/media/libeffects/lvm/tests/Android.bp
index 8627c13..639af4d 100644
--- a/media/libeffects/lvm/tests/Android.bp
+++ b/media/libeffects/lvm/tests/Android.bp
@@ -10,6 +10,26 @@
}
cc_test {
+ name: "EffectBundleTest",
+ vendor: true,
+ gtest: true,
+ host_supported: true,
+ test_suites: ["device-tests"],
+ srcs: ["EffectBundleTest.cpp"],
+ static_libs: [
+ "libaudioutils",
+ "libbundlewrapper",
+ "libmusicbundle",
+ ],
+ shared_libs: [
+ "liblog",
+ ],
+ header_libs: [
+ "libhardware_headers",
+ ],
+}
+
+cc_test {
name: "lvmtest",
host_supported: false,
proprietary: true,
@@ -54,8 +74,9 @@
cc_test {
name: "reverb_test",
- host_supported: false,
+ host_supported: true,
proprietary: true,
+ gtest: false,
include_dirs: [
"frameworks/av/media/libeffects/lvm/wrapper/Reverb",
diff --git a/media/libeffects/lvm/tests/EffectBundleTest.cpp b/media/libeffects/lvm/tests/EffectBundleTest.cpp
new file mode 100644
index 0000000..aae09de
--- /dev/null
+++ b/media/libeffects/lvm/tests/EffectBundleTest.cpp
@@ -0,0 +1,335 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <array>
+#include <audio_utils/channels.h>
+#include <audio_utils/primitives.h>
+#include <climits>
+#include <cstdlib>
+#include <gtest/gtest.h>
+#include <hardware/audio_effect.h>
+#include <log/log.h>
+#include <random>
+#include <system/audio.h>
+#include <vector>
+
+extern audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM;
+
+// Corresponds to SNR for 1 bit difference between two int16_t signals
+constexpr float kSNRThreshold = 90.308998;
+
+// Update isBassBoost, if the order of effects is updated
+constexpr effect_uuid_t kEffectUuids[] = {
+ // NXP SW BassBoost
+ {0x8631f300, 0x72e2, 0x11df, 0xb57e, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
+ // NXP SW Virtualizer
+ {0x1d4033c0, 0x8557, 0x11df, 0x9f2d, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
+ // NXP SW Equalizer
+ {0xce772f20, 0x847d, 0x11df, 0xbb17, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
+ // NXP SW Volume
+ {0x119341a0, 0x8469, 0x11df, 0x81f9, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
+};
+
+static bool isBassBoost(const effect_uuid_t* uuid) {
+ // Update this, if the order of effects in kEffectUuids is updated
+ return uuid == &kEffectUuids[0];
+}
+
+constexpr size_t kNumEffectUuids = std::size(kEffectUuids);
+
+constexpr audio_channel_mask_t kChMasks[] = {
+ AUDIO_CHANNEL_OUT_MONO, AUDIO_CHANNEL_OUT_STEREO,
+ AUDIO_CHANNEL_OUT_2POINT1, AUDIO_CHANNEL_OUT_2POINT0POINT2,
+ AUDIO_CHANNEL_OUT_QUAD, AUDIO_CHANNEL_OUT_QUAD_BACK,
+ AUDIO_CHANNEL_OUT_QUAD_SIDE, AUDIO_CHANNEL_OUT_SURROUND,
+ AUDIO_CHANNEL_INDEX_MASK_4, AUDIO_CHANNEL_OUT_2POINT1POINT2,
+ AUDIO_CHANNEL_OUT_3POINT0POINT2, AUDIO_CHANNEL_OUT_PENTA,
+ AUDIO_CHANNEL_INDEX_MASK_5, AUDIO_CHANNEL_OUT_3POINT1POINT2,
+ AUDIO_CHANNEL_OUT_5POINT1, AUDIO_CHANNEL_OUT_5POINT1_BACK,
+ AUDIO_CHANNEL_OUT_5POINT1_SIDE, AUDIO_CHANNEL_INDEX_MASK_6,
+ AUDIO_CHANNEL_OUT_6POINT1, AUDIO_CHANNEL_INDEX_MASK_7,
+ AUDIO_CHANNEL_OUT_5POINT1POINT2, AUDIO_CHANNEL_OUT_7POINT1,
+ AUDIO_CHANNEL_INDEX_MASK_8, AUDIO_CHANNEL_INDEX_MASK_9,
+ AUDIO_CHANNEL_INDEX_MASK_10, AUDIO_CHANNEL_INDEX_MASK_11,
+ AUDIO_CHANNEL_INDEX_MASK_12, AUDIO_CHANNEL_INDEX_MASK_13,
+ AUDIO_CHANNEL_INDEX_MASK_14, AUDIO_CHANNEL_INDEX_MASK_15,
+ AUDIO_CHANNEL_INDEX_MASK_16, AUDIO_CHANNEL_INDEX_MASK_17,
+ AUDIO_CHANNEL_INDEX_MASK_18, AUDIO_CHANNEL_INDEX_MASK_19,
+ AUDIO_CHANNEL_INDEX_MASK_20, AUDIO_CHANNEL_INDEX_MASK_21,
+ AUDIO_CHANNEL_INDEX_MASK_22, AUDIO_CHANNEL_INDEX_MASK_23,
+ AUDIO_CHANNEL_INDEX_MASK_24,
+};
+
+constexpr size_t kNumChMasks = std::size(kChMasks);
+
+constexpr size_t kSampleRates[] = {8000, 11025, 12000, 16000, 22050, 24000, 32000,
+ 44100, 48000, 88200, 96000, 176400, 192000};
+
+constexpr size_t kNumSampleRates = std::size(kSampleRates);
+
+constexpr size_t kFrameCounts[] = {4, 2048};
+
+constexpr size_t kNumFrameCounts = std::size(kFrameCounts);
+
+constexpr size_t kLoopCounts[] = {1, 4};
+
+constexpr size_t kNumLoopCounts = std::size(kLoopCounts);
+
+class EffectBundleHelper {
+ public:
+ EffectBundleHelper(const effect_uuid_t* uuid, size_t chMask, size_t sampleRate,
+ size_t frameCount, size_t loopCount)
+ : mUuid(uuid),
+ mChMask(chMask),
+ mChannelCount(audio_channel_count_from_out_mask(mChMask)),
+ mSampleRate(sampleRate),
+ mFrameCount(frameCount),
+ mLoopCount(loopCount) {}
+ void createEffect();
+ void releaseEffect();
+ void configEffect();
+ void process(float* input, float* output);
+
+ private:
+ const effect_uuid_t* mUuid;
+ const size_t mChMask;
+ const size_t mChannelCount;
+ const size_t mSampleRate;
+ const size_t mFrameCount;
+ const size_t mLoopCount;
+ effect_handle_t mEffectHandle{};
+};
+
+void EffectBundleHelper::createEffect() {
+ int status = AUDIO_EFFECT_LIBRARY_INFO_SYM.create_effect(mUuid, 1, 1, &mEffectHandle);
+ ASSERT_EQ(status, 0) << "create_effect returned an error " << status << "\n";
+}
+
+void EffectBundleHelper::releaseEffect() {
+ int status = AUDIO_EFFECT_LIBRARY_INFO_SYM.release_effect(mEffectHandle);
+ ASSERT_EQ(status, 0) << "release_effect returned an error " << status << "\n";
+}
+
+void EffectBundleHelper::configEffect() {
+ effect_config_t config{};
+ config.inputCfg.samplingRate = config.outputCfg.samplingRate = mSampleRate;
+ config.inputCfg.channels = config.outputCfg.channels = mChMask;
+ config.inputCfg.format = config.outputCfg.format = AUDIO_FORMAT_PCM_FLOAT;
+
+ int reply = 0;
+ uint32_t replySize = sizeof(reply);
+ int status = (*mEffectHandle)
+ ->command(mEffectHandle, EFFECT_CMD_SET_CONFIG, sizeof(effect_config_t),
+ &config, &replySize, &reply);
+ ASSERT_EQ(status, 0) << "command returned an error " << status << "\n";
+ ASSERT_EQ(reply, 0) << "command reply non zero " << reply << "\n";
+
+ status = (*mEffectHandle)
+ ->command(mEffectHandle, EFFECT_CMD_ENABLE, 0, nullptr, &replySize, &reply);
+ ASSERT_EQ(status, 0) << "command enable returned an error " << status << "\n";
+ ASSERT_EQ(reply, 0) << "command reply non zero " << reply << "\n";
+}
+
+void EffectBundleHelper::process(float* input, float* output) {
+ audio_buffer_t inBuffer = {.frameCount = mFrameCount, .f32 = input};
+ audio_buffer_t outBuffer = {.frameCount = mFrameCount, .f32 = output};
+ for (size_t i = 0; i < mLoopCount; i++) {
+ int status = (*mEffectHandle)->process(mEffectHandle, &inBuffer, &outBuffer);
+ ASSERT_EQ(status, 0) << "process returned an error " << status << "\n";
+
+ inBuffer.f32 += mFrameCount * mChannelCount;
+ outBuffer.f32 += mFrameCount * mChannelCount;
+ }
+}
+
+typedef std::tuple<int, int, int, int, int> SingleEffectTestParam;
+class SingleEffectTest : public ::testing::TestWithParam<SingleEffectTestParam> {
+ public:
+ SingleEffectTest()
+ : mChMask(kChMasks[std::get<0>(GetParam())]),
+ mChannelCount(audio_channel_count_from_out_mask(mChMask)),
+ mSampleRate(kSampleRates[std::get<1>(GetParam())]),
+ mFrameCount(kFrameCounts[std::get<2>(GetParam())]),
+ mLoopCount(kLoopCounts[std::get<3>(GetParam())]),
+ mTotalFrameCount(mFrameCount * mLoopCount),
+ mUuid(&kEffectUuids[std::get<4>(GetParam())]) {}
+
+ const size_t mChMask;
+ const size_t mChannelCount;
+ const size_t mSampleRate;
+ const size_t mFrameCount;
+ const size_t mLoopCount;
+ const size_t mTotalFrameCount;
+ const effect_uuid_t* mUuid;
+};
+
+// Tests applying a single effect
+TEST_P(SingleEffectTest, SimpleProcess) {
+ SCOPED_TRACE(testing::Message()
+ << "chMask: " << mChMask << " sampleRate: " << mSampleRate
+ << " frameCount: " << mFrameCount << " loopCount: " << mLoopCount);
+
+ EffectBundleHelper effect(mUuid, mChMask, mSampleRate, mFrameCount, mLoopCount);
+
+ ASSERT_NO_FATAL_FAILURE(effect.createEffect());
+ ASSERT_NO_FATAL_FAILURE(effect.configEffect());
+
+ // Initialize input buffer with deterministic pseudo-random values
+ std::vector<float> input(mTotalFrameCount * mChannelCount);
+ std::vector<float> output(mTotalFrameCount * mChannelCount);
+ std::minstd_rand gen(mChMask);
+ std::uniform_real_distribution<> dis(-1.0f, 1.0f);
+ for (auto& in : input) {
+ in = dis(gen);
+ }
+ ASSERT_NO_FATAL_FAILURE(effect.process(input.data(), output.data()));
+ ASSERT_NO_FATAL_FAILURE(effect.releaseEffect());
+}
+
+INSTANTIATE_TEST_SUITE_P(EffectBundleTestAll, SingleEffectTest,
+ ::testing::Combine(::testing::Range(0, (int)kNumChMasks),
+ ::testing::Range(0, (int)kNumSampleRates),
+ ::testing::Range(0, (int)kNumFrameCounts),
+ ::testing::Range(0, (int)kNumLoopCounts),
+ ::testing::Range(0, (int)kNumEffectUuids)));
+
+typedef std::tuple<int, int, int, int> SingleEffectComparisonTestParam;
+class SingleEffectComparisonTest
+ : public ::testing::TestWithParam<SingleEffectComparisonTestParam> {
+ public:
+ SingleEffectComparisonTest()
+ : mSampleRate(kSampleRates[std::get<0>(GetParam())]),
+ mFrameCount(kFrameCounts[std::get<1>(GetParam())]),
+ mLoopCount(kLoopCounts[std::get<2>(GetParam())]),
+ mTotalFrameCount(mFrameCount * mLoopCount),
+ mUuid(&kEffectUuids[std::get<3>(GetParam())]) {}
+
+ const size_t mSampleRate;
+ const size_t mFrameCount;
+ const size_t mLoopCount;
+ const size_t mTotalFrameCount;
+ const effect_uuid_t* mUuid;
+};
+
+template <typename T>
+float computeSnr(const T* ref, const T* tst, size_t count) {
+ double signal{};
+ double noise{};
+
+ for (size_t i = 0; i < count; ++i) {
+ const double value(ref[i]);
+ const double diff(tst[i] - value);
+ signal += value * value;
+ noise += diff * diff;
+ }
+ // Initialized to a value greater than kSNRThreshold to handle
+ // cases where ref and tst match exactly
+ float snr = kSNRThreshold + 1.0f;
+ if (signal > 0.0f && noise > 0.0f) {
+ snr = 10.f * log(signal / noise);
+ }
+ return snr;
+}
+
+// Compares first two channels in multi-channel output to stereo output when same effect is applied
+TEST_P(SingleEffectComparisonTest, SimpleProcess) {
+ SCOPED_TRACE(testing::Message() << " sampleRate: " << mSampleRate << " frameCount: "
+ << mFrameCount << " loopCount: " << mLoopCount);
+
+ // Initialize mono input buffer with deterministic pseudo-random values
+ std::vector<float> monoInput(mTotalFrameCount);
+
+ std::minstd_rand gen(mSampleRate);
+ std::uniform_real_distribution<> dis(-1.0f, 1.0f);
+ for (auto& in : monoInput) {
+ in = dis(gen);
+ }
+
+ // Generate stereo by repeating mono channel data
+ std::vector<float> stereoInput(mTotalFrameCount * FCC_2);
+ adjust_channels(monoInput.data(), FCC_1, stereoInput.data(), FCC_2, sizeof(float),
+ mTotalFrameCount * sizeof(float) * FCC_1);
+
+ // Apply effect on stereo channels
+ EffectBundleHelper stereoEffect(mUuid, AUDIO_CHANNEL_OUT_STEREO, mSampleRate, mFrameCount,
+ mLoopCount);
+
+ ASSERT_NO_FATAL_FAILURE(stereoEffect.createEffect());
+ ASSERT_NO_FATAL_FAILURE(stereoEffect.configEffect());
+
+ std::vector<float> stereoOutput(mTotalFrameCount * FCC_2);
+ ASSERT_NO_FATAL_FAILURE(stereoEffect.process(stereoInput.data(), stereoOutput.data()));
+ ASSERT_NO_FATAL_FAILURE(stereoEffect.releaseEffect());
+
+ // Convert stereo float data to stereo int16_t to be used as reference
+ std::vector<int16_t> stereoRefI16(mTotalFrameCount * FCC_2);
+ memcpy_to_i16_from_float(stereoRefI16.data(), stereoOutput.data(), mTotalFrameCount * FCC_2);
+
+ for (size_t chMask : kChMasks) {
+ size_t channelCount = audio_channel_count_from_out_mask(chMask);
+ EffectBundleHelper testEffect(mUuid, chMask, mSampleRate, mFrameCount, mLoopCount);
+
+ ASSERT_NO_FATAL_FAILURE(testEffect.createEffect());
+ ASSERT_NO_FATAL_FAILURE(testEffect.configEffect());
+
+ std::vector<float> testInput(mTotalFrameCount * channelCount);
+
+ // Repeat mono channel data to all the channels
+ // adjust_channels() zero fills channels > 2, hence can't be used here
+ for (size_t i = 0; i < mTotalFrameCount; ++i) {
+ auto* fp = &testInput[i * channelCount];
+ std::fill(fp, fp + channelCount, monoInput[i]);
+ }
+
+ std::vector<float> testOutput(mTotalFrameCount * channelCount);
+ ASSERT_NO_FATAL_FAILURE(testEffect.process(testInput.data(), testOutput.data()));
+ ASSERT_NO_FATAL_FAILURE(testEffect.releaseEffect());
+
+ // Extract first two channels
+ std::vector<float> stereoTestOutput(mTotalFrameCount * FCC_2);
+ adjust_channels(testOutput.data(), channelCount, stereoTestOutput.data(), FCC_2,
+ sizeof(float), mTotalFrameCount * sizeof(float) * channelCount);
+
+ // Convert the test data to int16_t
+ std::vector<int16_t> stereoTestI16(mTotalFrameCount * FCC_2);
+ memcpy_to_i16_from_float(stereoTestI16.data(), stereoTestOutput.data(),
+ mTotalFrameCount * FCC_2);
+
+ if (isBassBoost(mUuid)) {
+ // SNR must be above the threshold
+ float snr = computeSnr<int16_t>(stereoRefI16.data(), stereoTestI16.data(),
+ mTotalFrameCount * FCC_2);
+ ASSERT_GT(snr, kSNRThreshold) << "SNR " << snr << "is lower than " << kSNRThreshold;
+ } else {
+ ASSERT_EQ(0,
+ memcmp(stereoRefI16.data(), stereoTestI16.data(), mTotalFrameCount * FCC_2))
+ << "First two channels do not match with stereo output \n";
+ }
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(EffectBundleTestAll, SingleEffectComparisonTest,
+ ::testing::Combine(::testing::Range(0, (int)kNumSampleRates),
+ ::testing::Range(0, (int)kNumFrameCounts),
+ ::testing::Range(0, (int)kNumLoopCounts),
+ ::testing::Range(0, (int)kNumEffectUuids)));
+
+int main(int argc, char** argv) {
+ ::testing::InitGoogleTest(&argc, argv);
+ int status = RUN_ALL_TESTS();
+ ALOGV("Test result = %d\n", status);
+ return status;
+}
diff --git a/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh b/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
index 7b0ff5e..df7ca5a 100755
--- a/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
+++ b/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
@@ -102,6 +102,11 @@
((++error_count))
fi
+ # Do not compare cases where -vcBal is in flags and chMask is 0 (due to
+ # stereo computation)
+ if [[ $flags == *"-vcBal:"* ]] && [[ $chMask -eq 0 ]]; then
+ continue
+ fi
# two channel files should be identical to higher channel
# computation (first 2 channels).
diff --git a/media/libeffects/lvm/tests/lvmtest.cpp b/media/libeffects/lvm/tests/lvmtest.cpp
index f107b18..e484a1a 100644
--- a/media/libeffects/lvm/tests/lvmtest.cpp
+++ b/media/libeffects/lvm/tests/lvmtest.cpp
@@ -415,52 +415,11 @@
} else {
return -EINVAL;
}
-
- LVM_Fs_en sampleRate;
- switch (plvmConfigParams->samplingFreq) {
- case 8000:
- sampleRate = LVM_FS_8000;
- break;
- case 11025:
- sampleRate = LVM_FS_11025;
- break;
- case 12000:
- sampleRate = LVM_FS_12000;
- break;
- case 16000:
- sampleRate = LVM_FS_16000;
- break;
- case 22050:
- sampleRate = LVM_FS_22050;
- break;
- case 24000:
- sampleRate = LVM_FS_24000;
- break;
- case 32000:
- sampleRate = LVM_FS_32000;
- break;
- case 44100:
- sampleRate = LVM_FS_44100;
- break;
- case 48000:
- sampleRate = LVM_FS_48000;
- break;
- case 88200:
- sampleRate = LVM_FS_88200;
- break;
- case 96000:
- sampleRate = LVM_FS_96000;
- break;
- case 176400:
- sampleRate = LVM_FS_176400;
- break;
- case 192000:
- sampleRate = LVM_FS_192000;
- break;
- default:
- return -EINVAL;
+ params->SampleRate = lvmFsForSampleRate(plvmConfigParams->samplingFreq);
+ if (params->SampleRate == LVM_FS_INVALID) {
+ ALOGE("lvmControl invalid sampling rate %d", plvmConfigParams->samplingFreq);
+ return -EINVAL;
}
- params->SampleRate = sampleRate;
/* Concert Sound parameters */
params->VirtualizerOperatingMode = plvmConfigParams->csEnable;
@@ -530,19 +489,11 @@
const int ioChannelCount = plvmConfigParams->fChannels;
const int ioFrameSize = ioChannelCount * sizeof(short); // file load size
const int maxChannelCount = std::max(channelCount, ioChannelCount);
- /*
- * Mono input will be converted to 2 channels internally in the process call
- * by copying the same data into the second channel.
- * Hence when channelCount is 1, output buffer should be allocated for
- * 2 channels. The memAllocChCount takes care of allocation of sufficient
- * memory for the output buffer.
- */
- const int memAllocChCount = (channelCount == 1 ? 2 : channelCount);
std::vector<short> in(frameLength * maxChannelCount);
std::vector<short> out(frameLength * maxChannelCount);
std::vector<float> floatIn(frameLength * channelCount);
- std::vector<float> floatOut(frameLength * memAllocChCount);
+ std::vector<float> floatOut(frameLength * channelCount);
int frameCounter = 0;
while (fread(in.data(), ioFrameSize, frameLength, finp) == (size_t)frameLength) {
diff --git a/media/libeffects/lvm/wrapper/Android.bp b/media/libeffects/lvm/wrapper/Android.bp
index 09c4aef..e169e3c 100644
--- a/media/libeffects/lvm/wrapper/Android.bp
+++ b/media/libeffects/lvm/wrapper/Android.bp
@@ -67,6 +67,7 @@
},
vendor: true,
+ host_supported: true,
srcs: ["Reverb/EffectReverb.cpp"],
cppflags: [
@@ -83,7 +84,6 @@
shared_libs: [
"libaudioutils",
"libcutils",
- "libdl",
"liblog",
],
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index 46dbf7e..9ccccb4 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -957,51 +957,12 @@
pContext->config = *pConfig;
const LVM_INT16 NrChannels = audio_channel_count_from_out_mask(pConfig->inputCfg.channels);
- switch (pConfig->inputCfg.samplingRate) {
- case 8000:
- SampleRate = LVM_FS_8000;
- pContext->pBundledContext->SamplesPerSecond = 8000 * NrChannels;
- break;
- case 16000:
- SampleRate = LVM_FS_16000;
- pContext->pBundledContext->SamplesPerSecond = 16000 * NrChannels;
- break;
- case 22050:
- SampleRate = LVM_FS_22050;
- pContext->pBundledContext->SamplesPerSecond = 22050 * NrChannels;
- break;
- case 32000:
- SampleRate = LVM_FS_32000;
- pContext->pBundledContext->SamplesPerSecond = 32000 * NrChannels;
- break;
- case 44100:
- SampleRate = LVM_FS_44100;
- pContext->pBundledContext->SamplesPerSecond = 44100 * NrChannels;
- break;
- case 48000:
- SampleRate = LVM_FS_48000;
- pContext->pBundledContext->SamplesPerSecond = 48000 * NrChannels;
- break;
- case 88200:
- SampleRate = LVM_FS_88200;
- pContext->pBundledContext->SamplesPerSecond = 88200 * NrChannels;
- break;
- case 96000:
- SampleRate = LVM_FS_96000;
- pContext->pBundledContext->SamplesPerSecond = 96000 * NrChannels;
- break;
- case 176400:
- SampleRate = LVM_FS_176400;
- pContext->pBundledContext->SamplesPerSecond = 176400 * NrChannels;
- break;
- case 192000:
- SampleRate = LVM_FS_192000;
- pContext->pBundledContext->SamplesPerSecond = 192000 * NrChannels;
- break;
- default:
- ALOGV("\tEffect_setConfig invalid sampling rate %d", pConfig->inputCfg.samplingRate);
- return -EINVAL;
+ SampleRate = lvmFsForSampleRate(pConfig->inputCfg.samplingRate);
+ if (SampleRate == LVM_FS_INVALID) {
+ ALOGV("Effect_setConfig invalid sampling rate %d", pConfig->inputCfg.samplingRate);
+ return -EINVAL;
}
+ pContext->pBundledContext->SamplesPerSecond = pConfig->inputCfg.samplingRate * NrChannels;
if (pContext->pBundledContext->SampleRate != SampleRate ||
pContext->pBundledContext->ChMask != pConfig->inputCfg.channels) {
diff --git a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
index 9ea70ce..4489e81 100644
--- a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
+++ b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
@@ -190,8 +190,8 @@
/* Effect Library Interface Implementation */
-extern "C" int EffectCreate(const effect_uuid_t* uuid, int32_t sessionId __unused,
- int32_t ioId __unused, effect_handle_t* pHandle) {
+extern "C" int EffectCreate(const effect_uuid_t* uuid, int32_t /* sessionId __unused */,
+ int32_t /* ioId __unused */, effect_handle_t* pHandle) {
int ret;
int i;
int length = sizeof(gDescriptors) / sizeof(const effect_descriptor_t*);
@@ -517,6 +517,9 @@
}
}
}
+
+ LvmStatus = LVREV_FreeInstance(pContext->hInstance);
+ LVM_ERROR_CHECK(LvmStatus, "LVREV_FreeInstance", "Reverb_free")
} /* end Reverb_free */
//----------------------------------------------------------------------------
@@ -553,40 +556,10 @@
// ALOGV("\tReverb_setConfig calling memcpy");
pContext->config = *pConfig;
- switch (pConfig->inputCfg.samplingRate) {
- case 8000:
- SampleRate = LVM_FS_8000;
- break;
- case 16000:
- SampleRate = LVM_FS_16000;
- break;
- case 22050:
- SampleRate = LVM_FS_22050;
- break;
- case 32000:
- SampleRate = LVM_FS_32000;
- break;
- case 44100:
- SampleRate = LVM_FS_44100;
- break;
- case 48000:
- SampleRate = LVM_FS_48000;
- break;
- case 88200:
- SampleRate = LVM_FS_88200;
- break;
- case 96000:
- SampleRate = LVM_FS_96000;
- break;
- case 176400:
- SampleRate = LVM_FS_176400;
- break;
- case 192000:
- SampleRate = LVM_FS_192000;
- break;
- default:
- ALOGV("\rReverb_setConfig invalid sampling rate %d", pConfig->inputCfg.samplingRate);
- return -EINVAL;
+ SampleRate = lvmFsForSampleRate(pConfig->inputCfg.samplingRate);
+ if (SampleRate == LVM_FS_INVALID) {
+ ALOGE("Reverb_setConfig invalid sampling rate %d", pConfig->inputCfg.samplingRate);
+ return -EINVAL;
}
if (pContext->SampleRate != SampleRate) {
diff --git a/media/libmedia/IMediaRecorder.cpp b/media/libmedia/IMediaRecorder.cpp
index ac86f72..154988d 100644
--- a/media/libmedia/IMediaRecorder.cpp
+++ b/media/libmedia/IMediaRecorder.cpp
@@ -66,6 +66,7 @@
ENABLE_AUDIO_DEVICE_CALLBACK,
GET_ACTIVE_MICROPHONES,
GET_PORT_ID,
+ GET_RTP_DATA_USAGE,
SET_PREFERRED_MICROPHONE_DIRECTION,
SET_PREFERRED_MICROPHONE_FIELD_DIMENSION,
SET_PRIVACY_SENSITIVE,
@@ -476,6 +477,23 @@
*portId = (audio_port_handle_t)reply.readInt32();
return NO_ERROR;
}
+
+ status_t getRtpDataUsage(uint64_t *bytes)
+ {
+ ALOGV("getRtpDataUsage");
+ if (bytes == nullptr) {
+ return BAD_VALUE;
+ }
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
+ status_t status = remote()->transact(GET_RTP_DATA_USAGE, data, &reply);
+ if (status != OK
+ || (status = (status_t)reply.readInt32()) != NO_ERROR) {
+ *bytes = 0;
+ return status;
+ }
+ return reply.readUint64(bytes);
+ }
};
IMPLEMENT_META_INTERFACE(MediaRecorder, "android.media.IMediaRecorder");
@@ -759,6 +777,17 @@
}
return NO_ERROR;
}
+ case GET_RTP_DATA_USAGE: {
+ ALOGV("GET_RTP_DATA_USAGE");
+ CHECK_INTERFACE(IMediaRecorder, data, reply);
+ uint64_t bytes;
+ status_t status = getRtpDataUsage(&bytes);
+ reply->writeInt32(status);
+ if (status == NO_ERROR) {
+ reply->writeUint64(bytes);
+ }
+ return NO_ERROR;
+ }
case SET_PREFERRED_MICROPHONE_DIRECTION: {
ALOGV("SET_PREFERRED_MICROPHONE_DIRECTION");
CHECK_INTERFACE(IMediaRecorder, data, reply);
diff --git a/media/libmedia/include/media/IMediaRecorder.h b/media/libmedia/include/media/IMediaRecorder.h
index 651bd5e..6e69782 100644
--- a/media/libmedia/include/media/IMediaRecorder.h
+++ b/media/libmedia/include/media/IMediaRecorder.h
@@ -78,6 +78,7 @@
virtual status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction) = 0;
virtual status_t setPreferredMicrophoneFieldDimension(float zoom) = 0;
virtual status_t getPortId(audio_port_handle_t *portId) = 0;
+ virtual status_t getRtpDataUsage(uint64_t *bytes) = 0;
};
// ----------------------------------------------------------------------------
diff --git a/media/libmedia/include/media/MediaRecorderBase.h b/media/libmedia/include/media/MediaRecorderBase.h
index 8493f64..d9a7efb 100644
--- a/media/libmedia/include/media/MediaRecorderBase.h
+++ b/media/libmedia/include/media/MediaRecorderBase.h
@@ -77,6 +77,7 @@
virtual status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction) = 0;
virtual status_t setPreferredMicrophoneFieldDimension(float zoom) = 0;
virtual status_t getPortId(audio_port_handle_t *portId) const = 0;
+ virtual status_t getRtpDataUsage(uint64_t *bytes) = 0;
diff --git a/media/libmedia/include/media/mediarecorder.h b/media/libmedia/include/media/mediarecorder.h
index fbcdb28..84c92f6 100644
--- a/media/libmedia/include/media/mediarecorder.h
+++ b/media/libmedia/include/media/mediarecorder.h
@@ -270,6 +270,7 @@
status_t setPreferredMicrophoneFieldDimension(float zoom);
status_t getPortId(audio_port_handle_t *portId) const;
+ status_t getRtpDataUsage(uint64_t *bytes);
private:
void doCleanUp();
diff --git a/media/libmedia/mediarecorder.cpp b/media/libmedia/mediarecorder.cpp
index d9d1f25..e3cd9d8 100644
--- a/media/libmedia/mediarecorder.cpp
+++ b/media/libmedia/mediarecorder.cpp
@@ -913,4 +913,14 @@
return mMediaRecorder->getPortId(portId);
}
+status_t MediaRecorder::getRtpDataUsage(uint64_t *bytes)
+{
+ ALOGV("getRtpDataUsage");
+
+ if (mMediaRecorder == NULL) {
+ ALOGE("media recorder is not initialized yet");
+ return INVALID_OPERATION;
+ }
+ return mMediaRecorder->getRtpDataUsage(bytes);
+}
} // namespace android
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index 9b1974b..9f16a22 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -28,6 +28,7 @@
#include <binder/IServiceManager.h>
#include <binder/MemoryHeapBase.h>
#include <binder/MemoryBase.h>
+#include <camera/CameraUtils.h>
#include <codec2/hidl/client.h>
#include <cutils/atomic.h>
#include <cutils/properties.h> // for property_get
@@ -423,30 +424,35 @@
sp<IServiceManager> sm = defaultServiceManager();
- // WORKAROUND: We don't know if camera exists here and getService might block for 5 seconds.
- // Use checkService for camera if we don't know it exists.
- static std::atomic<bool> sCameraChecked(false); // once true never becomes false.
- static std::atomic<bool> sCameraVerified(false); // once true never becomes false.
- sp<IBinder> binder = (sCameraVerified || !sCameraChecked)
- ? sm->getService(String16("media.camera")) : sm->checkService(String16("media.camera"));
- // If the device does not have a camera, do not create a death listener for it.
- if (binder != NULL) {
- sCameraVerified = true;
- mDeathNotifiers.emplace_back(
- binder, [l = wp<IMediaRecorderClient>(listener)](){
- sp<IMediaRecorderClient> listener = l.promote();
- if (listener) {
- ALOGV("media.camera service died. "
- "Sending death notification.");
- listener->notify(
- MEDIA_ERROR, MEDIA_ERROR_SERVER_DIED,
- MediaPlayerService::CAMERA_PROCESS_DEATH);
- } else {
- ALOGW("media.camera service died without a death handler.");
- }
- });
+ static const bool sCameraDisabled = CameraUtils::isCameraServiceDisabled();
+
+ if (!sCameraDisabled) {
+ // WORKAROUND: We don't know if camera exists here and getService might block for 5 seconds.
+ // Use checkService for camera if we don't know it exists.
+ static std::atomic<bool> sCameraChecked(false); // once true never becomes false.
+ static std::atomic<bool> sCameraVerified(false); // once true never becomes false.
+
+ sp<IBinder> binder = (sCameraVerified || !sCameraChecked)
+ ? sm->getService(String16("media.camera")) : sm->checkService(String16("media.camera"));
+ // If the device does not have a camera, do not create a death listener for it.
+ if (binder != NULL) {
+ sCameraVerified = true;
+ mDeathNotifiers.emplace_back(
+ binder, [l = wp<IMediaRecorderClient>(listener)](){
+ sp<IMediaRecorderClient> listener = l.promote();
+ if (listener) {
+ ALOGV("media.camera service died. "
+ "Sending death notification.");
+ listener->notify(
+ MEDIA_ERROR, MEDIA_ERROR_SERVER_DIED,
+ MediaPlayerService::CAMERA_PROCESS_DEATH);
+ } else {
+ ALOGW("media.camera service died without a death handler.");
+ }
+ });
+ }
+ sCameraChecked = true;
}
- sCameraChecked = true;
{
using ::android::hidl::base::V1_0::IBase;
@@ -585,4 +591,13 @@
}
return NO_INIT;
}
+
+status_t MediaRecorderClient::getRtpDataUsage(uint64_t *bytes) {
+ ALOGV("getRtpDataUsage");
+ Mutex::Autolock lock(mLock);
+ if (mRecorder != NULL) {
+ return mRecorder->getRtpDataUsage(bytes);
+ }
+ return NO_INIT;
+}
}; // namespace android
diff --git a/media/libmediaplayerservice/MediaRecorderClient.h b/media/libmediaplayerservice/MediaRecorderClient.h
index 12257e5..e041855 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.h
+++ b/media/libmediaplayerservice/MediaRecorderClient.h
@@ -86,6 +86,7 @@
virtual status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
virtual status_t setPreferredMicrophoneFieldDimension(float zoom);
status_t getPortId(audio_port_handle_t *portId) override;
+ virtual status_t getRtpDataUsage(uint64_t *bytes);
private:
friend class MediaPlayerService; // for accessing private constructor
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index b2f6407..ecbdf61 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -2568,6 +2568,14 @@
return NO_INIT;
}
+status_t StagefrightRecorder::getRtpDataUsage(uint64_t *bytes) {
+ if (mWriter != 0) {
+ *bytes = mWriter->getAccumulativeBytes();
+ return OK;
+ }
+ return NO_INIT;
+}
+
status_t StagefrightRecorder::dump(
int fd, const Vector<String16>& args) const {
ALOGV("dump");
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index 0362edd..4bba869 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -82,6 +82,7 @@
virtual status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
virtual status_t setPreferredMicrophoneFieldDimension(float zoom);
status_t getPortId(audio_port_handle_t *portId) const override;
+ virtual status_t getRtpDataUsage(uint64_t *bytes);
private:
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 47362ef..389249e 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -2858,23 +2858,24 @@
in.writeInt32(payloadType);
switch (payloadType) {
- case NuPlayer::RTPSource::RTCP_TSFB: // RTCP TSFB
- case NuPlayer::RTPSource::RTCP_PSFB: // RTCP PSFB
- case NuPlayer::RTPSource::RTP_AUTODOWN:
+ case ARTPSource::RTCP_TSFB: // RTCP TSFB
+ case ARTPSource::RTCP_PSFB: // RTCP PSFB
+ case ARTPSource::RTP_AUTODOWN:
{
int32_t feedbackType, id;
CHECK(msg->findInt32("feedback-type", &feedbackType));
CHECK(msg->findInt32("sender", &id));
in.writeInt32(feedbackType);
in.writeInt32(id);
- if (payloadType == NuPlayer::RTPSource::RTCP_TSFB) {
+ if (payloadType == ARTPSource::RTCP_TSFB) {
int32_t bitrate;
CHECK(msg->findInt32("bit-rate", &bitrate));
in.writeInt32(bitrate);
}
break;
}
- case NuPlayer::RTPSource::RTP_QUALITY:
+ case ARTPSource::RTP_QUALITY:
+ case ARTPSource::RTP_QUALITY_EMC:
{
int32_t feedbackType, bitrate;
int32_t highestSeqNum, baseSeqNum, prevExpected;
@@ -2895,7 +2896,7 @@
in.writeInt32(prevNumBufRecv);
break;
}
- case NuPlayer::RTPSource::RTP_CVO:
+ case ARTPSource::RTP_CVO:
{
int32_t cvo;
CHECK(msg->findInt32("cvo", &cvo));
diff --git a/media/libmediaplayerservice/nuplayer/RTPSource.cpp b/media/libmediaplayerservice/nuplayer/RTPSource.cpp
index b1901e8..b43df38 100644
--- a/media/libmediaplayerservice/nuplayer/RTPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTPSource.cpp
@@ -45,8 +45,18 @@
mRTPConn(new ARTPConnection(ARTPConnection::kViLTEConnection)),
mEOSTimeoutAudio(0),
mEOSTimeoutVideo(0),
- mLastCVOUpdated(-1) {
- ALOGD("RTPSource initialized with rtpParams=%s", rtpParams.string());
+ mFirstAccessUnit(true),
+ mAllTracksHaveTime(false),
+ mNTPAnchorUs(-1),
+ mMediaAnchorUs(-1),
+ mLastMediaTimeUs(-1),
+ mNumAccessUnitsReceived(0),
+ mLastCVOUpdated(-1),
+ mReceivedFirstRTCPPacket(false),
+ mReceivedFirstRTPPacket(false),
+ mPausing(false),
+ mPauseGeneration(0) {
+ ALOGD("RTPSource initialized with rtpParams=%s", rtpParams.string());
}
NuPlayer::RTPSource::~RTPSource() {
@@ -289,7 +299,7 @@
if ((*accessUnit) != NULL && (*accessUnit)->meta()->findInt32("cvo", &cvo) &&
cvo != mLastCVOUpdated) {
sp<AMessage> msg = new AMessage();
- msg->setInt32("payload-type", NuPlayer::RTPSource::RTP_CVO);
+ msg->setInt32("payload-type", ARTPSource::RTP_CVO);
msg->setInt32("cvo", cvo);
sp<AMessage> notify = dupNotify();
diff --git a/media/libmediaplayerservice/nuplayer/RTPSource.h b/media/libmediaplayerservice/nuplayer/RTPSource.h
index fb2d3b9..3b4f9e9 100644
--- a/media/libmediaplayerservice/nuplayer/RTPSource.h
+++ b/media/libmediaplayerservice/nuplayer/RTPSource.h
@@ -33,6 +33,7 @@
#include "AnotherPacketSource.h"
#include "APacketSource.h"
#include "ARTPConnection.h"
+#include "ARTPSource.h"
#include "ASessionDescription.h"
#include "NuPlayerSource.h"
@@ -51,16 +52,6 @@
const sp<AMessage> ¬ify,
const String8& rtpParams);
- enum {
- RTP_FIRST_PACKET = 100,
- RTCP_FIRST_PACKET = 101,
- RTP_QUALITY = 102,
- RTCP_TSFB = 205,
- RTCP_PSFB = 206,
- RTP_CVO = 300,
- RTP_AUTODOWN = 400,
- };
-
virtual status_t getBufferingSettings(
BufferingSettings* buffering /* nonnull */) override;
virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 8f1da0d..71a4ad8 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -6860,6 +6860,7 @@
ALOGV("onAllocateComponent");
CHECK(mCodec->mOMXNode == NULL);
+ mCodec->mFatalError = false;
sp<AMessage> notify = new AMessage(kWhatOMXMessageList, mCodec);
notify->setInt32("generation", mCodec->mNodeGeneration + 1);
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 0ed0de1..e228a9d 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -321,11 +321,24 @@
BufferQueue::createBufferQueue(&mProducer, &mConsumer);
mSurface = new Surface(mProducer, false /* controlledByApp */);
struct ConsumerListener : public BnConsumerListener {
- void onFrameAvailable(const BufferItem&) override {}
+ ConsumerListener(const sp<IGraphicBufferConsumer> &consumer) {
+ mConsumer = consumer;
+ }
+ void onFrameAvailable(const BufferItem&) override {
+ BufferItem buffer;
+ // consume buffer
+ sp<IGraphicBufferConsumer> consumer = mConsumer.promote();
+ if (consumer != nullptr && consumer->acquireBuffer(&buffer, 0) == NO_ERROR) {
+ consumer->releaseBuffer(buffer.mSlot, buffer.mFrameNumber,
+ EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, buffer.mFence);
+ }
+ }
+
+ wp<IGraphicBufferConsumer> mConsumer;
void onBuffersReleased() override {}
void onSidebandStreamChanged() override {}
};
- sp<ConsumerListener> listener{new ConsumerListener};
+ sp<ConsumerListener> listener{new ConsumerListener(mConsumer)};
mConsumer->consumerConnect(listener, false);
mConsumer->setConsumerName(String8{"MediaCodec.release"});
mConsumer->setConsumerUsageBits(usage);
@@ -1311,6 +1324,8 @@
// save msg for reset
mConfigureMsg = msg;
+ sp<AMessage> callback = mCallback;
+
status_t err;
std::vector<MediaResourceParcel> resources;
resources.push_back(MediaResource::CodecResource(mFlags & kFlagIsSecure, mIsVideo));
@@ -1335,7 +1350,18 @@
// the configure failure is due to wrong state.
ALOGE("configure failed with err 0x%08x, resetting...", err);
- reset();
+ status_t err2 = reset();
+ if (err2 != OK) {
+ ALOGE("retrying configure: failed to reset codec (%08x)", err2);
+ break;
+ }
+ if (callback != nullptr) {
+ err2 = setCallback(callback);
+ if (err2 != OK) {
+ ALOGE("retrying configure: failed to set callback (%08x)", err2);
+ break;
+ }
+ }
}
if (!isResourceError(err)) {
break;
@@ -1444,6 +1470,8 @@
status_t MediaCodec::start() {
sp<AMessage> msg = new AMessage(kWhatStart, this);
+ sp<AMessage> callback;
+
status_t err;
std::vector<MediaResourceParcel> resources;
resources.push_back(MediaResource::CodecResource(mFlags & kFlagIsSecure, mIsVideo));
@@ -1468,6 +1496,20 @@
ALOGE("retrying start: failed to configure codec");
break;
}
+ if (callback != nullptr) {
+ err = setCallback(callback);
+ if (err != OK) {
+ ALOGE("retrying start: failed to set callback");
+ break;
+ }
+ ALOGD("succeed to set callback for reclaim");
+ }
+ }
+
+ // Keep callback message after the first iteration if necessary.
+ if (i == 0 && mCallback != nullptr && mFlags & kFlagIsAsync) {
+ callback = mCallback;
+ ALOGD("keep callback message for reclaim");
}
sp<AMessage> response;
diff --git a/media/libstagefright/include/media/stagefright/MediaWriter.h b/media/libstagefright/include/media/stagefright/MediaWriter.h
index 17b1abf..9f20185 100644
--- a/media/libstagefright/include/media/stagefright/MediaWriter.h
+++ b/media/libstagefright/include/media/stagefright/MediaWriter.h
@@ -58,6 +58,7 @@
virtual void updatePayloadType(int32_t /*payloadType*/) {}
virtual void updateSocketNetwork(int64_t /*socketNetwork*/) {}
virtual uint32_t getSequenceNum() { return 0; }
+ virtual uint64_t getAccumulativeBytes() { return 0; }
protected:
virtual ~MediaWriter() {}
diff --git a/media/libstagefright/rtsp/AAVCAssembler.cpp b/media/libstagefright/rtsp/AAVCAssembler.cpp
index 72a377d..2f93d5d 100644
--- a/media/libstagefright/rtsp/AAVCAssembler.cpp
+++ b/media/libstagefright/rtsp/AAVCAssembler.cpp
@@ -112,24 +112,25 @@
ARTPAssembler::AssemblyStatus AAVCAssembler::addNALUnit(
const sp<ARTPSource> &source) {
List<sp<ABuffer> > *queue = source->queue();
+ const uint32_t firstRTPTime = source->mFirstRtpTime;
if (queue->empty()) {
return NOT_ENOUGH_DATA;
}
sp<ABuffer> buffer = *queue->begin();
- uint32_t rtpTime;
- CHECK(buffer->meta()->findInt32("rtp-time", (int32_t *)&rtpTime));
buffer->meta()->setObject("source", source);
+ int64_t rtpTime = findRTPTime(firstRTPTime, buffer);
+
int64_t startTime = source->mFirstSysTime / 1000;
int64_t nowTime = ALooper::GetNowUs() / 1000;
int64_t playedTime = nowTime - startTime;
- int64_t playedTimeRtp =
- source->mFirstRtpTime + (((uint32_t)playedTime) * (source->mClockRate / 1000));
- const uint32_t jitterTime =
- (uint32_t)(source->mClockRate / ((float)1000 / (source->mJbTimeMs)));
- uint32_t expiredTimeInJb = rtpTime + jitterTime;
+
+ int64_t playedTimeRtp = source->mFirstRtpTime + playedTime * (int64_t)source->mClockRate / 1000;
+ const int64_t jitterTime = source->mJbTimeMs * (int64_t)source->mClockRate / 1000;
+
+ int64_t expiredTimeInJb = rtpTime + jitterTime;
bool isExpired = expiredTimeInJb <= (playedTimeRtp);
bool isTooLate200 = expiredTimeInJb < (playedTimeRtp - jitterTime);
bool isTooLate300 = expiredTimeInJb < (playedTimeRtp - (jitterTime * 3 / 2));
@@ -154,11 +155,11 @@
if (isTooLate300) {
ALOGW("buffer arrived after 300ms ... \t Diff in Jb=%lld \t Seq# %d",
- ((long long)playedTimeRtp) - expiredTimeInJb, buffer->int32Data());
+ (long long)(playedTimeRtp - expiredTimeInJb), buffer->int32Data());
printNowTimeUs(startTime, nowTime, playedTime);
printRTPTime(rtpTime, playedTimeRtp, expiredTimeInJb, isExpired);
- mNextExpectedSeqNo = pickProperSeq(queue, jitterTime, playedTimeRtp);
+ mNextExpectedSeqNo = pickProperSeq(queue, firstRTPTime, playedTimeRtp, jitterTime);
}
if (mNextExpectedSeqNoValid) {
@@ -564,14 +565,25 @@
msg->post();
}
-int32_t AAVCAssembler::pickProperSeq(const Queue *queue, uint32_t jit, int64_t play) {
+inline int64_t AAVCAssembler::findRTPTime(
+ const uint32_t& firstRTPTime, const sp<ABuffer>& buffer) {
+ /* If you want to +, -, * rtpTime, recommend to declare rtpTime as int64_t.
+ Because rtpTime can be near UINT32_MAX. Beware the overflow. */
+ int64_t rtpTime = 0;
+ CHECK(buffer->meta()->findInt32("rtp-time", (int32_t *)&rtpTime));
+ // If the first overs 2^31 and rtp unders 2^31, the rtp value is overflowed one.
+ int64_t overflowMask = (firstRTPTime & 0x80000000 & ~rtpTime) << 1;
+ return rtpTime | overflowMask;
+}
+
+int32_t AAVCAssembler::pickProperSeq(const Queue *queue,
+ uint32_t first, int64_t play, int64_t jit) {
sp<ABuffer> buffer = *(queue->begin());
- uint32_t rtpTime;
int32_t nextSeqNo = buffer->int32Data();
Queue::const_iterator it = queue->begin();
while (it != queue->end()) {
- CHECK((*it)->meta()->findInt32("rtp-time", (int32_t *)&rtpTime));
+ int64_t rtpTime = findRTPTime(first, *it);
// if pkt in time exists, that should be the next pivot
if (rtpTime + jit >= play) {
nextSeqNo = (*it)->int32Data();
@@ -613,9 +625,9 @@
(long long)start, (long long)now, (long long)play);
}
-inline void AAVCAssembler::printRTPTime(uint32_t rtp, int64_t play, uint32_t exp, bool isExp) {
- ALOGD("rtp-time(JB)=%u, played-rtp-time(JB)=%lld, expired-rtp-time(JB)=%u isExpired=%d",
- rtp, (long long)play, exp, isExp);
+inline void AAVCAssembler::printRTPTime(int64_t rtp, int64_t play, int64_t exp, bool isExp) {
+ ALOGD("rtp-time(JB)=%lld, played-rtp-time(JB)=%lld, expired-rtp-time(JB)=%lld expired=%d",
+ (long long)rtp, (long long)play, (long long)exp, isExp);
}
ARTPAssembler::AssemblyStatus AAVCAssembler::assembleMore(
diff --git a/media/libstagefright/rtsp/AAVCAssembler.h b/media/libstagefright/rtsp/AAVCAssembler.h
index 79fc7c2..9d71e2f 100644
--- a/media/libstagefright/rtsp/AAVCAssembler.h
+++ b/media/libstagefright/rtsp/AAVCAssembler.h
@@ -63,12 +63,13 @@
void submitAccessUnit();
- int32_t pickProperSeq(const Queue *q, uint32_t jit, int64_t play);
+ inline int64_t findRTPTime(const uint32_t& firstRTPTime, const sp<ABuffer>& buffer);
+ int32_t pickProperSeq(const Queue *q, uint32_t first, int64_t play, int64_t jit);
bool recycleUnit(uint32_t start, uint32_t end, uint32_t connected,
size_t avail, float goodRatio);
int32_t deleteUnitUnderSeq(Queue *q, uint32_t seq);
void printNowTimeUs(int64_t start, int64_t now, int64_t play);
- void printRTPTime(uint32_t rtp, int64_t play, uint32_t exp, bool isExp);
+ void printRTPTime(int64_t rtp, int64_t play, int64_t exp, bool isExp);
DISALLOW_EVIL_CONSTRUCTORS(AAVCAssembler);
};
diff --git a/media/libstagefright/rtsp/AHEVCAssembler.cpp b/media/libstagefright/rtsp/AHEVCAssembler.cpp
index 148a0ba..553ea08 100644
--- a/media/libstagefright/rtsp/AHEVCAssembler.cpp
+++ b/media/libstagefright/rtsp/AHEVCAssembler.cpp
@@ -122,6 +122,7 @@
ARTPAssembler::AssemblyStatus AHEVCAssembler::addNALUnit(
const sp<ARTPSource> &source) {
List<sp<ABuffer> > *queue = source->queue();
+ const uint32_t firstRTPTime = source->mFirstRtpTime;
if (queue->empty()) {
return NOT_ENOUGH_DATA;
@@ -129,15 +130,15 @@
sp<ABuffer> buffer = *queue->begin();
buffer->meta()->setObject("source", source);
- uint32_t rtpTime;
- CHECK(buffer->meta()->findInt32("rtp-time", (int32_t *)&rtpTime));
+ int64_t rtpTime = findRTPTime(firstRTPTime, buffer);
+
int64_t startTime = source->mFirstSysTime / 1000;
int64_t nowTime = ALooper::GetNowUs() / 1000;
int64_t playedTime = nowTime - startTime;
- int64_t playedTimeRtp = source->mFirstRtpTime +
- (((uint32_t)playedTime) * (source->mClockRate / 1000));
- const uint32_t jitterTime = (uint32_t)(source->mClockRate / ((float)1000 / (source->mJbTimeMs)));
- uint32_t expiredTimeInJb = rtpTime + jitterTime;
+ int64_t playedTimeRtp = source->mFirstRtpTime + playedTime * (int64_t)source->mClockRate / 1000;
+ const int64_t jitterTime = source->mJbTimeMs * (int64_t)source->mClockRate / 1000;
+
+ int64_t expiredTimeInJb = rtpTime + jitterTime;
bool isExpired = expiredTimeInJb <= (playedTimeRtp);
bool isTooLate200 = expiredTimeInJb < (playedTimeRtp - jitterTime);
bool isTooLate300 = expiredTimeInJb < (playedTimeRtp - (jitterTime * 3 / 2));
@@ -162,11 +163,11 @@
if (isTooLate300) {
ALOGW("buffer arrived after 300ms ... \t Diff in Jb=%lld \t Seq# %d",
- ((long long)playedTimeRtp) - expiredTimeInJb, buffer->int32Data());
+ (long long)(playedTimeRtp - expiredTimeInJb), buffer->int32Data());
printNowTimeUs(startTime, nowTime, playedTime);
printRTPTime(rtpTime, playedTimeRtp, expiredTimeInJb, isExpired);
- mNextExpectedSeqNo = pickProperSeq(queue, jitterTime, playedTimeRtp);
+ mNextExpectedSeqNo = pickProperSeq(queue, firstRTPTime, playedTimeRtp, jitterTime);
}
if (mNextExpectedSeqNoValid) {
@@ -577,14 +578,25 @@
msg->post();
}
-int32_t AHEVCAssembler::pickProperSeq(const Queue *queue, uint32_t jit, int64_t play) {
+inline int64_t AHEVCAssembler::findRTPTime(
+ const uint32_t& firstRTPTime, const sp<ABuffer>& buffer) {
+ /* If you want to +, -, * rtpTime, recommend to declare rtpTime as int64_t.
+ Because rtpTime can be near UINT32_MAX. Beware the overflow. */
+ int64_t rtpTime = 0;
+ CHECK(buffer->meta()->findInt32("rtp-time", (int32_t *)&rtpTime));
+ // If the first overs 2^31 and rtp unders 2^31, the rtp value is overflowed one.
+ int64_t overflowMask = (firstRTPTime & 0x80000000 & ~rtpTime) << 1;
+ return rtpTime | overflowMask;
+}
+
+int32_t AHEVCAssembler::pickProperSeq(const Queue *queue,
+ uint32_t first, int64_t play, int64_t jit) {
sp<ABuffer> buffer = *(queue->begin());
- uint32_t rtpTime;
int32_t nextSeqNo = buffer->int32Data();
Queue::const_iterator it = queue->begin();
while (it != queue->end()) {
- CHECK((*it)->meta()->findInt32("rtp-time", (int32_t *)&rtpTime));
+ int64_t rtpTime = findRTPTime(first, *it);
// if pkt in time exists, that should be the next pivot
if (rtpTime + jit >= play) {
nextSeqNo = (*it)->int32Data();
@@ -626,12 +638,11 @@
(long long)start, (long long)now, (long long)play);
}
-inline void AHEVCAssembler::printRTPTime(uint32_t rtp, int64_t play, uint32_t exp, bool isExp) {
- ALOGD("rtp-time(JB)=%u, played-rtp-time(JB)=%lld, expired-rtp-time(JB)=%u isExpired=%d",
- rtp, (long long)play, exp, isExp);
+inline void AHEVCAssembler::printRTPTime(int64_t rtp, int64_t play, int64_t exp, bool isExp) {
+ ALOGD("rtp-time(JB)=%lld, played-rtp-time(JB)=%lld, expired-rtp-time(JB)=%lld expired=%d",
+ (long long)rtp, (long long)play, (long long)exp, isExp);
}
-
ARTPAssembler::AssemblyStatus AHEVCAssembler::assembleMore(
const sp<ARTPSource> &source) {
AssemblyStatus status = addNALUnit(source);
diff --git a/media/libstagefright/rtsp/AHEVCAssembler.h b/media/libstagefright/rtsp/AHEVCAssembler.h
index 16fc1c8..bf1cded 100644
--- a/media/libstagefright/rtsp/AHEVCAssembler.h
+++ b/media/libstagefright/rtsp/AHEVCAssembler.h
@@ -64,12 +64,13 @@
void submitAccessUnit();
- int32_t pickProperSeq(const Queue *queue, uint32_t jit, int64_t play);
- bool recycleUnit(uint32_t start, uint32_t end, uint32_t conneceted,
+ inline int64_t findRTPTime(const uint32_t& firstRTPTime, const sp<ABuffer>& buffer);
+ int32_t pickProperSeq(const Queue *q, uint32_t first, int64_t play, int64_t jit);
+ bool recycleUnit(uint32_t start, uint32_t end, uint32_t connected,
size_t avail, float goodRatio);
int32_t deleteUnitUnderSeq(Queue *queue, uint32_t seq);
void printNowTimeUs(int64_t start, int64_t now, int64_t play);
- void printRTPTime(uint32_t rtp, int64_t play, uint32_t exp, bool isExp);
+ void printRTPTime(int64_t rtp, int64_t play, int64_t exp, bool isExp);
DISALLOW_EVIL_CONSTRUCTORS(AHEVCAssembler);
};
diff --git a/media/libstagefright/rtsp/ARTPConnection.cpp b/media/libstagefright/rtsp/ARTPConnection.cpp
index 97a9bbb..61c06d1 100644
--- a/media/libstagefright/rtsp/ARTPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTPConnection.cpp
@@ -464,6 +464,22 @@
ALOGD("Send FIR immediately for lost Packets");
send(&*it, buffer);
}
+
+ buffer->setRange(0, 0);
+ it->mSources.valueAt(i)->addTMMBR(buffer, mTargetBitrate);
+ mTargetBitrate = -1;
+ if (buffer->size() > 0) {
+ ALOGV("Sending TMMBR...");
+ ssize_t n = send(&*it, buffer);
+
+ if (n != (ssize_t)buffer->size()) {
+ ALOGW("failed to send RTCP TMMBR (%s).",
+ n >= 0 ? "connection gone" : strerror(errno));
+
+ it = mStreams.erase(it);
+ continue;
+ }
+ }
}
++it;
@@ -509,16 +525,14 @@
ssize_t n = send(s, buffer);
- if (n <= 0) {
+ if (n != (ssize_t)buffer->size()) {
ALOGW("failed to send RTCP receiver report (%s).",
- n == 0 ? "connection gone" : strerror(errno));
+ n >= 0 ? "connection gone" : strerror(errno));
it = mStreams.erase(it);
continue;
}
- CHECK_EQ(n, (ssize_t)buffer->size());
-
mLastReceiverReportTimeUs = nowUs;
}
@@ -862,6 +876,12 @@
sp<ARTPSource> source = findSource(s, id);
+ // Report a final stastics to be used for rtp data usage.
+ int64_t nowUs = ALooper::GetNowUs();
+ int32_t timeDiff = (nowUs - mLastBitrateReportTimeUs) / 1000000ll;
+ int32_t bitrate = mCumulativeBytes * 8 / timeDiff;
+ source->notifyPktInfo(bitrate, true /* isRegular */);
+
source->byeReceived();
return OK;
@@ -1079,6 +1099,28 @@
mCumulativeBytes = 0;
mLastBitrateReportTimeUs = nowUs;
}
+ else if (mLastEarlyNotifyTimeUs + 100000ll <= nowUs) {
+ int32_t timeDiff = (nowUs - mLastBitrateReportTimeUs) / 1000000ll;
+ int32_t bitrate = mCumulativeBytes * 8 / timeDiff;
+ mLastEarlyNotifyTimeUs = nowUs;
+
+ List<StreamInfo>::iterator it = mStreams.begin();
+ while (it != mStreams.end()) {
+ StreamInfo *s = &*it;
+ if (s->mIsInjected) {
+ ++it;
+ continue;
+ }
+ for (size_t i = 0; i < s->mSources.size(); ++i) {
+ sp<ARTPSource> source = s->mSources.valueAt(i);
+ if (source->isNeedToEarlyNotify()) {
+ source->notifyPktInfo(bitrate, false /* isRegular */);
+ mLastEarlyNotifyTimeUs = nowUs + (1000000ll * 3600 * 24); // after 1 day
+ }
+ }
+ ++it;
+ }
+ }
else if (mLastBitrateReportTimeUs + 1000000ll <= nowUs) {
int32_t timeDiff = (nowUs - mLastBitrateReportTimeUs) / 1000000ll;
int32_t bitrate = mCumulativeBytes * 8 / timeDiff;
@@ -1101,31 +1143,15 @@
}
buffer->setRange(0, 0);
-
for (size_t i = 0; i < s->mSources.size(); ++i) {
sp<ARTPSource> source = s->mSources.valueAt(i);
- source->notifyPktInfo(bitrate, nowUs);
- source->addTMMBR(buffer, mTargetBitrate);
- }
- if (buffer->size() > 0) {
- ALOGV("Sending TMMBR...");
-
- ssize_t n = send(s, buffer);
-
- if (n <= 0) {
- ALOGW("failed to send RTCP TMMBR (%s).",
- n == 0 ? "connection gone" : strerror(errno));
-
- it = mStreams.erase(it);
- continue;
- }
-
- CHECK_EQ(n, (ssize_t)buffer->size());
+ source->notifyPktInfo(bitrate, true /* isRegular */);
}
++it;
}
mCumulativeBytes = 0;
mLastBitrateReportTimeUs = nowUs;
+ mLastEarlyNotifyTimeUs = nowUs;
}
}
void ARTPConnection::onInjectPacket(const sp<AMessage> &msg) {
diff --git a/media/libstagefright/rtsp/ARTPConnection.h b/media/libstagefright/rtsp/ARTPConnection.h
index 7c8218f..a37ac0e 100644
--- a/media/libstagefright/rtsp/ARTPConnection.h
+++ b/media/libstagefright/rtsp/ARTPConnection.h
@@ -84,6 +84,7 @@
bool mPollEventPending;
int64_t mLastReceiverReportTimeUs;
int64_t mLastBitrateReportTimeUs;
+ int64_t mLastEarlyNotifyTimeUs;
int32_t mSelfID;
int32_t mTargetBitrate;
diff --git a/media/libstagefright/rtsp/ARTPSource.cpp b/media/libstagefright/rtsp/ARTPSource.cpp
index c611f6f..3fdf8e4 100644
--- a/media/libstagefright/rtsp/ARTPSource.cpp
+++ b/media/libstagefright/rtsp/ARTPSource.cpp
@@ -34,6 +34,8 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
+#include <strings.h>
+
namespace android {
static uint32_t kSourceID = 0xdeadbeef;
@@ -380,21 +382,24 @@
data[14] = (mID >> 8) & 0xff;
data[15] = mID & 0xff;
- int32_t exp, mantissa;
+ // Find the first bit '1' from left & right side of the value.
+ int32_t leftEnd = 31 - __builtin_clz(targetBitrate);
+ int32_t rightEnd = ffs(targetBitrate) - 1;
- // Round off to the nearest 2^4th
- ALOGI("UE -> Op Req Rx bitrate : %d ", targetBitrate & 0xfffffff0);
- for (exp=4 ; exp < 32 ; exp++)
- if (((targetBitrate >> exp) & 0x01) != 0)
- break;
- mantissa = targetBitrate >> exp;
+ // Mantissa have only 17bit space by RTCP specification.
+ if ((leftEnd - rightEnd) > 16) {
+ rightEnd = leftEnd - 16;
+ }
+ int32_t mantissa = targetBitrate >> rightEnd;
- data[16] = ((exp << 2) & 0xfc) | ((mantissa & 0x18000) >> 15);
- data[17] = (mantissa & 0x07f80) >> 7;
- data[18] = (mantissa & 0x0007f) << 1;
+ data[16] = ((rightEnd << 2) & 0xfc) | ((mantissa & 0x18000) >> 15);
+ data[17] = (mantissa & 0x07f80) >> 7;
+ data[18] = (mantissa & 0x0007f) << 1;
data[19] = 40; // 40 bytes overhead;
buffer->setRange(buffer->offset(), buffer->size() + (data[3] + 1) * sizeof(int32_t));
+
+ ALOGI("UE -> Op Req Rx bitrate : %d ", mantissa << rightEnd);
}
int ARTPSource::addNACK(const sp<ABuffer> &buffer) {
@@ -512,10 +517,22 @@
mIssueFIRRequests = enable;
}
-void ARTPSource::notifyPktInfo(int32_t bitrate, int64_t /*time*/) {
+bool ARTPSource::isNeedToEarlyNotify() {
+ uint32_t expected = mHighestSeqNumber - mBaseSeqNumber + 1;
+ int32_t intervalExpectedInNow = expected - mPrevExpected;
+ int32_t intervalReceivedInNow = mNumBuffersReceived - mPrevNumBuffersReceived;
+
+ if (intervalExpectedInNow - intervalReceivedInNow > 5)
+ return true;
+ return false;
+}
+
+void ARTPSource::notifyPktInfo(int32_t bitrate, bool isRegular) {
+ int32_t payloadType = isRegular ? RTP_QUALITY : RTP_QUALITY_EMC;
+
sp<AMessage> notify = mNotify->dup();
notify->setInt32("rtcp-event", 1);
- notify->setInt32("payload-type", 102);
+ notify->setInt32("payload-type", payloadType);
notify->setInt32("feedback-type", 0);
// sending target bitrate up to application to share rtp quality.
notify->setInt32("bit-rate", bitrate);
@@ -526,9 +543,11 @@
notify->setInt32("prev-num-buf-recv", mPrevNumBuffersReceived);
notify->post();
- uint32_t expected = mHighestSeqNumber - mBaseSeqNumber + 1;
- mPrevExpected = expected;
- mPrevNumBuffersReceived = mNumBuffersReceived;
+ if (isRegular) {
+ uint32_t expected = mHighestSeqNumber - mBaseSeqNumber + 1;
+ mPrevExpected = expected;
+ mPrevNumBuffersReceived = mNumBuffersReceived;
+ }
}
void ARTPSource::onIssueFIRByAssembler() {
diff --git a/media/libstagefright/rtsp/ARTPSource.h b/media/libstagefright/rtsp/ARTPSource.h
index ea683a0..c51fd8a 100644
--- a/media/libstagefright/rtsp/ARTPSource.h
+++ b/media/libstagefright/rtsp/ARTPSource.h
@@ -40,6 +40,17 @@
const sp<ASessionDescription> &sessionDesc, size_t index,
const sp<AMessage> ¬ify);
+ enum {
+ RTP_FIRST_PACKET = 100,
+ RTCP_FIRST_PACKET = 101,
+ RTP_QUALITY = 102,
+ RTP_QUALITY_EMC = 103,
+ RTCP_TSFB = 205,
+ RTCP_PSFB = 206,
+ RTP_CVO = 300,
+ RTP_AUTODOWN = 400,
+ };
+
void processRTPPacket(const sp<ABuffer> &buffer);
void timeUpdate(uint32_t rtpTime, uint64_t ntpTime);
void byeReceived();
@@ -55,7 +66,8 @@
void setSelfID(const uint32_t selfID);
void setJbTime(const uint32_t jbTimeMs);
void setPeriodicFIR(bool enable);
- void notifyPktInfo(int32_t bitrate, int64_t time);
+ bool isNeedToEarlyNotify();
+ void notifyPktInfo(int32_t bitrate, bool isRegular);
// FIR needs to be sent by missing packet or broken video image.
void onIssueFIRByAssembler();
diff --git a/media/libstagefright/rtsp/ARTPWriter.cpp b/media/libstagefright/rtsp/ARTPWriter.cpp
index 76afb04..ec70952 100644
--- a/media/libstagefright/rtsp/ARTPWriter.cpp
+++ b/media/libstagefright/rtsp/ARTPWriter.cpp
@@ -20,8 +20,6 @@
#include "ARTPWriter.h"
-#include <fcntl.h>
-
#include <media/stagefright/MediaSource.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
@@ -32,6 +30,9 @@
#include <media/stagefright/MetaData.h>
#include <utils/ByteOrder.h>
+#include <fcntl.h>
+#include <strings.h>
+
#define PT 97
#define PT_STR "97"
@@ -46,10 +47,12 @@
#define H265_NALU_SPS 0x21
#define H265_NALU_PPS 0x22
-#define LINK_HEADER_SIZE 14
-#define IP_HEADER_SIZE 20
+#define IPV4_HEADER_SIZE 20
+#define IPV6_HEADER_SIZE 40
#define UDP_HEADER_SIZE 8
-#define TCPIP_HEADER_SIZE (LINK_HEADER_SIZE + IP_HEADER_SIZE + UDP_HEADER_SIZE)
+#define TCPIPV4_HEADER_SIZE (IPV4_HEADER_SIZE + UDP_HEADER_SIZE)
+#define TCPIPV6_HEADER_SIZE (IPV6_HEADER_SIZE + UDP_HEADER_SIZE)
+#define TCPIP_HEADER_SIZE TCPIPV4_HEADER_SIZE
#define RTP_HEADER_SIZE 12
#define RTP_HEADER_EXT_SIZE 8
#define RTP_FU_HEADER_SIZE 2
@@ -62,6 +65,9 @@
static const size_t kMaxPacketSize = 1280;
static char kCNAME[255] = "someone@somewhere";
+static const size_t kTrafficRecorderMaxEntries = 128;
+static const size_t kTrafficRecorderMaxTimeSpanMs = 2000;
+
static int UniformRand(int limit) {
return ((double)rand() * limit) / RAND_MAX;
}
@@ -71,7 +77,8 @@
mFd(dup(fd)),
mLooper(new ALooper),
mReflector(new AHandlerReflector<ARTPWriter>(this)),
- mTrafficRec(new TrafficRecorder<uint32_t, size_t>(128)) {
+ mTrafficRec(new TrafficRecorder<uint32_t /* Time */, Bytes>(
+ kTrafficRecorderMaxEntries, kTrafficRecorderMaxTimeSpanMs)) {
CHECK_GE(fd, 0);
mIsIPv6 = false;
@@ -122,7 +129,8 @@
mFd(dup(fd)),
mLooper(new ALooper),
mReflector(new AHandlerReflector<ARTPWriter>(this)),
- mTrafficRec(new TrafficRecorder<uint32_t, size_t>(128)) {
+ mTrafficRec(new TrafficRecorder<uint32_t /* Time */, Bytes>(
+ kTrafficRecorderMaxEntries, kTrafficRecorderMaxTimeSpanMs)) {
CHECK_GE(fd, 0);
mIsIPv6 = false;
@@ -135,7 +143,8 @@
mSPSBuf = NULL;
mPPSBuf = NULL;
- mSeqNo = seqNo;
+ initState();
+ mSeqNo = seqNo; // Must use explicit # of seq for RTP continuity
#if LOG_TO_FILES
mRTPFd = open(
@@ -186,6 +195,29 @@
mFd = -1;
}
+void ARTPWriter::initState() {
+ if (mSourceID == 0)
+ mSourceID = rand();
+ mPayloadType = 0;
+ if (mSeqNo == 0)
+ mSeqNo = UniformRand(65536);
+ mRTPTimeBase = 0;
+ mNumRTPSent = 0;
+ mNumRTPOctetsSent = 0;
+ mLastRTPTime = 0;
+ mLastNTPTime = 0;
+
+ mOpponentID = 0;
+ mBitrate = 192000;
+
+ mNumSRsSent = 0;
+ mRTPCVOExtMap = -1;
+ mRTPCVODegrees = 0;
+ mRTPSockNetwork = 0;
+
+ mMode = INVALID;
+}
+
status_t ARTPWriter::addSource(const sp<MediaSource> &source) {
mSource = source;
return OK;
@@ -203,21 +235,7 @@
}
mFlags &= ~kFlagEOS;
- if (mSourceID == 0)
- mSourceID = rand();
- if (mSeqNo == 0)
- mSeqNo = UniformRand(65536);
- mRTPTimeBase = 0;
- mNumRTPSent = 0;
- mNumRTPOctetsSent = 0;
- mLastRTPTime = 0;
- mLastNTPTime = 0;
- mOpponentID = 0;
- mBitrate = 192000;
- mNumSRsSent = 0;
- mRTPCVOExtMap = -1;
- mRTPCVODegrees = 0;
- mRTPSockNetwork = 0;
+ initState();
const char *mime;
CHECK(mSource->getFormat()->findCString(kKeyMIMEType, &mime));
@@ -246,7 +264,6 @@
if (params->findInt64(kKeySocketNetwork, &sockNetwork))
updateSocketNetwork(sockNetwork);
- mMode = INVALID;
if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
mMode = H264;
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC)) {
@@ -600,7 +617,8 @@
ALOGW("packets can not be sent. ret=%d, buf=%d", (int)n, (int)buffer->size());
} else {
// Record current traffic & Print bits while last 1sec (1000ms)
- mTrafficRec->writeBytes(buffer->size());
+ mTrafficRec->writeBytes(buffer->size() +
+ (mIsIPv6 ? TCPIPV6_HEADER_SIZE : TCPIPV4_HEADER_SIZE));
mTrafficRec->printAccuBitsForLastPeriod(1000, 1000);
}
@@ -729,21 +747,24 @@
data[14] = (mOpponentID >> 8) & 0xff;
data[15] = mOpponentID & 0xff;
- int32_t exp, mantissa;
+ // Find the first bit '1' from left & right side of the value.
+ int32_t leftEnd = 31 - __builtin_clz(mBitrate);
+ int32_t rightEnd = ffs(mBitrate) - 1;
- // Round off to the nearest 2^4th
- ALOGI("UE -> Op Noti Tx bitrate : %d ", mBitrate & 0xfffffff0);
- for (exp=4 ; exp < 32 ; exp++)
- if (((mBitrate >> exp) & 0x01) != 0)
- break;
- mantissa = mBitrate >> exp;
+ // Mantissa have only 17bit space by RTCP specification.
+ if ((leftEnd - rightEnd) > 16) {
+ rightEnd = leftEnd - 16;
+ }
+ int32_t mantissa = mBitrate >> rightEnd;
- data[16] = ((exp << 2) & 0xfc) | ((mantissa & 0x18000) >> 15);
- data[17] = (mantissa & 0x07f80) >> 7;
- data[18] = (mantissa & 0x0007f) << 1;
+ data[16] = ((rightEnd << 2) & 0xfc) | ((mantissa & 0x18000) >> 15);
+ data[17] = (mantissa & 0x07f80) >> 7;
+ data[18] = (mantissa & 0x0007f) << 1;
data[19] = 40; // 40 bytes overhead;
buffer->setRange(buffer->offset(), buffer->size() + 20);
+
+ ALOGI("UE -> Op Noti Tx bitrate : %d ", mantissa << rightEnd);
}
// static
@@ -1362,6 +1383,10 @@
return mSeqNo;
}
+uint64_t ARTPWriter::getAccumulativeBytes() {
+ return mTrafficRec->readBytesForTotal();
+}
+
static size_t getFrameSize(bool isWide, unsigned FT) {
static const size_t kFrameSizeNB[8] = {
95, 103, 118, 134, 148, 159, 204, 244
diff --git a/media/libstagefright/rtsp/ARTPWriter.h b/media/libstagefright/rtsp/ARTPWriter.h
index 6f25a66..28d6ec5 100644
--- a/media/libstagefright/rtsp/ARTPWriter.h
+++ b/media/libstagefright/rtsp/ARTPWriter.h
@@ -53,6 +53,7 @@
void updateSocketDscp(int32_t dscp);
void updateSocketNetwork(int64_t socketNetwork);
uint32_t getSequenceNum();
+ virtual uint64_t getAccumulativeBytes() override;
virtual void onMessageReceived(const sp<AMessage> &msg);
virtual void setTMMBNInfo(uint32_t opponentID, uint32_t bitrate);
@@ -118,7 +119,8 @@
uint32_t mOpponentID;
uint32_t mBitrate;
- sp<TrafficRecorder<uint32_t, size_t> > mTrafficRec;
+ typedef uint64_t Bytes;
+ sp<TrafficRecorder<uint32_t /* Time */, Bytes> > mTrafficRec;
int32_t mNumSRsSent;
int32_t mRTPCVOExtMap;
@@ -135,6 +137,7 @@
static uint64_t GetNowNTP();
+ void initState();
void onRead(const sp<AMessage> &msg);
void onSendSR(const sp<AMessage> &msg);
diff --git a/media/libstagefright/rtsp/TrafficRecorder.h b/media/libstagefright/rtsp/TrafficRecorder.h
index f8e7c03..8ba8f90 100644
--- a/media/libstagefright/rtsp/TrafficRecorder.h
+++ b/media/libstagefright/rtsp/TrafficRecorder.h
@@ -27,44 +27,49 @@
template <class Time, class Bytes>
class TrafficRecorder : public RefBase {
private:
+ constexpr static size_t kMinNumEntries = 4;
+ constexpr static size_t kMaxNumEntries = 1024;
+
size_t mSize;
size_t mSizeMask;
Time *mTimeArray = NULL;
Bytes *mBytesArray = NULL;
- size_t mHeadIdx = 0;
- size_t mTailIdx = 0;
+ size_t mHeadIdx;
+ size_t mTailIdx;
- Time mClock = 0;
- Time mLastTimeOfPrint = 0;
- Bytes mAccuBytesOfPrint = 0;
+ int mLastReadIdx;
+
+ const Time mRecordLimit;
+ Time mClock;
+ Time mLastTimeOfPrint;
+ Bytes mAccuBytes;
+
public:
- TrafficRecorder();
- TrafficRecorder(size_t size);
+ TrafficRecorder(size_t size, Time accuTimeLimit);
virtual ~TrafficRecorder();
void init();
-
void updateClock(Time now);
-
+ Bytes readBytesForTotal();
Bytes readBytesForLastPeriod(Time period);
void writeBytes(Bytes bytes);
-
void printAccuBitsForLastPeriod(Time period, Time unit);
};
template <class Time, class Bytes>
-TrafficRecorder<Time, Bytes>::TrafficRecorder() {
- TrafficRecorder(128);
-}
-
-template <class Time, class Bytes>
-TrafficRecorder<Time, Bytes>::TrafficRecorder(size_t size) {
- size_t exp;
- for (exp = 0; exp < 32; exp++) {
- if (size <= (1ul << exp)) {
- break;
- }
+TrafficRecorder<Time, Bytes>::TrafficRecorder(size_t size, Time recordLimit)
+ : mRecordLimit(recordLimit) {
+ if (size > kMaxNumEntries) {
+ LOG(VERBOSE) << "Limiting TrafficRecorder size to " << kMaxNumEntries;
+ size = kMaxNumEntries;
+ } else if (size < kMinNumEntries) {
+ LOG(VERBOSE) << "Limiting TrafficRecorder size to " << kMaxNumEntries;
+ size = kMinNumEntries;
}
+
+ size_t exp = ((sizeof(size_t) == 8) ?
+ 64 - __builtin_clzl(size - 1) :
+ 32 - __builtin_clz(size - 1));
mSize = (1ul << exp); // size = 2^exp
mSizeMask = mSize - 1;
@@ -84,9 +89,15 @@
template <class Time, class Bytes>
void TrafficRecorder<Time, Bytes>::init() {
mHeadIdx = 0;
- mTailIdx = 0;
- mTimeArray[0] = 0;
- mBytesArray[0] = 0;
+ mTailIdx = mSizeMask;
+ for (int i = 0 ; i < mSize ; i++) {
+ mTimeArray[i] = 0;
+ mBytesArray[i] = 0;
+ }
+ mClock = 0;
+ mLastReadIdx = 0;
+ mLastTimeOfPrint = 0;
+ mAccuBytes = 0;
}
template <class Time, class Bytes>
@@ -95,54 +106,71 @@
}
template <class Time, class Bytes>
-Bytes TrafficRecorder<Time, Bytes>::readBytesForLastPeriod(Time period) {
- Bytes bytes = 0;
+Bytes TrafficRecorder<Time, Bytes>::readBytesForTotal() {
+ return mAccuBytes;
+}
- size_t i = mTailIdx;
- while (i != mHeadIdx) {
- LOG(VERBOSE) << "READ " << i << " time " << mTimeArray[i] << " \t EndOfPeriod " << mClock - period;
+template <class Time, class Bytes>
+Bytes TrafficRecorder<Time, Bytes>::readBytesForLastPeriod(Time period) {
+ // Not enough data
+ if (period > mClock)
+ return 0;
+
+ Bytes bytes = 0;
+ int i = mHeadIdx;
+ while (i != mTailIdx) {
+ LOG(VERBOSE) << "READ " << i << " time " << mTimeArray[i]
+ << " \t EndOfPeriod " << mClock - period
+ << "\t\t Bytes:" << mBytesArray[i] << "\t\t Accu: " << bytes;
if (mTimeArray[i] < mClock - period) {
break;
}
bytes += mBytesArray[i];
- i = (i + mSize - 1) & mSizeMask;
+ i = (i - 1) & mSizeMask;
}
- mHeadIdx = i;
+ mLastReadIdx = (i + 1) & mSizeMask;
+
return bytes;
}
template <class Time, class Bytes>
void TrafficRecorder<Time, Bytes>::writeBytes(Bytes bytes) {
- size_t writeIdx;
- if (mClock == mTimeArray[mTailIdx]) {
- writeIdx = mTailIdx;
+ int writeIdx;
+ if (mClock == mTimeArray[mHeadIdx]) {
+ writeIdx = mHeadIdx;
mBytesArray[writeIdx] += bytes;
} else {
- writeIdx = (mTailIdx + 1) % mSize;
+ writeIdx = (mHeadIdx + 1) & mSizeMask;
mTimeArray[writeIdx] = mClock;
mBytesArray[writeIdx] = bytes;
}
LOG(VERBOSE) << "WRITE " << writeIdx << " time " << mClock;
- if (writeIdx == mHeadIdx) {
- LOG(WARNING) << "Traffic recorder size exceeded at " << mHeadIdx;
- mHeadIdx = (mHeadIdx + 1) & mSizeMask;
+ if (writeIdx == mTailIdx) {
+ mTailIdx = (mTailIdx + 1) & mSizeMask;
}
- mTailIdx = writeIdx;
- mAccuBytesOfPrint += bytes;
+ mHeadIdx = writeIdx;
+ mAccuBytes += bytes;
}
template <class Time, class Bytes>
void TrafficRecorder<Time, Bytes>::printAccuBitsForLastPeriod(Time period, Time unit) {
- Time duration = mClock - mLastTimeOfPrint;
- float numOfUnit = (float)duration / unit;
- if (duration > period) {
- ALOGD("Actual Tx period %.0f ms \t %.0f Bits/Unit",
- numOfUnit * 1000.f, mAccuBytesOfPrint * 8.f / numOfUnit);
- mLastTimeOfPrint = mClock;
- mAccuBytesOfPrint = 0;
- init();
+ Time timeSinceLastPrint = mClock - mLastTimeOfPrint;
+ if (timeSinceLastPrint < period)
+ return;
+
+ Bytes sum = readBytesForLastPeriod(period);
+ Time readPeriod = mClock - mTimeArray[mLastReadIdx];
+
+ float numOfUnit = (float)(readPeriod) / (unit + FLT_MIN);
+ ALOGD("Actual Tx period %.3f unit \t %.0f bytes (%.0f Kbits)/Unit",
+ numOfUnit, sum / numOfUnit, sum * 8.f / numOfUnit / 1000.f);
+ mLastTimeOfPrint = mClock;
+
+ if (mClock - mTimeArray[mTailIdx] < mRecordLimit) {
+ // Size is not enough to record bytes for mRecordLimit period
+ ALOGW("Traffic recorder size is not enough. mRecordLimit %d", mRecordLimit);
}
}
diff --git a/media/libstagefright/writer_fuzzers/README.md b/media/libstagefright/writer_fuzzers/README.md
index 0d21031..6f95ecc 100644
--- a/media/libstagefright/writer_fuzzers/README.md
+++ b/media/libstagefright/writer_fuzzers/README.md
@@ -29,7 +29,7 @@
| Parameter| Valid Values| Configured Value|
|------------- |-------------| ----- |
-| `mime` | 0. `audio/3gpp` 1. `audio/amr-wb` 2. `audio/vorbis` 3. `audio/opus` 4. `audio/mp4a-latm` 5. `video/avc` 6. `video/hevc` 7. `video/mp4v-es` 8. `video/3gpp` 9. `video/x-vnd.on2.vp8` 10. `video/x-vnd.on2.vp9` | All the bits of 2nd byte of data for first track and 11th byte of data for second track (if present) modulus 10 |
+| `mime` | 0. `audio/3gpp` 1. `audio/amr-wb` 2. `audio/vorbis` 3. `audio/opus` 4. `audio/mp4a-latm` 5. `audio/mpeg` 6. `audio/mpeg-L1` 7. `audio/mpeg-L2` 8. `audio/midi` 9. `audio/qcelp` 10. `audio/g711-alaw` 11. `audio/g711-mlaw` 12. `audio/flac` 13. `audio/aac-adts` 14. `audio/gsm` 15. `audio/ac3` 16. `audio/eac3` 17. `audio/eac3-joc` 18. `audio/ac4` 19. `audio/scrambled` 20. `audio/alac` 21. `audio/x-ms-wma` 22. `audio/x-adpcm-ms` 23. `audio/x-adpcm-dvi-ima` 24. `video/avc` 25. `video/hevc` 26. `video/mp4v-es` 27. `video/3gpp` 28. `video/x-vnd.on2.vp8` 29. `video/x-vnd.on2.vp9` 30. `video/av01` 31. `video/mpeg2` 32. `video/dolby-vision` 33. `video/scrambled` 34. `video/divx` 35. `video/divx3` 36. `video/xvid` 37. `video/x-motion-jpeg` 38. `text/3gpp-tt` 39. `application/x-subrip` 40. `text/vtt` 41. `text/cea-608` 42. `text/cea-708` 43. `application/x-id3v4` | All the bits of 2nd byte of data for first track and 11th byte of data for second track and 20th byte of data for third track(if present) modulus 44 |
| `channel-count` | In the range `0 to INT32_MAX` | All the bits of 3rd byte to 6th bytes of data if first track is audio and 12th to 15th bytes of data if second track is audio |
| `sample-rate` | In the range `1 to INT32_MAX` | All the bits of 7th byte to 10th bytes of data if first track is audio and 16th to 19th bytes of data if second track is audio |
| `height` | In the range `0 to INT32_MAX` | All the bits of 3rd byte to 6th bytes of data if first track is video and 12th to 15th bytes of data if second track is video |
diff --git a/media/libstagefright/writer_fuzzers/WriterFuzzerBase.cpp b/media/libstagefright/writer_fuzzers/WriterFuzzerBase.cpp
index 844db39..ee7af70 100644
--- a/media/libstagefright/writer_fuzzers/WriterFuzzerBase.cpp
+++ b/media/libstagefright/writer_fuzzers/WriterFuzzerBase.cpp
@@ -53,7 +53,7 @@
return mNumCsds[trackIndex];
}
-vector<FrameData> WriterFuzzerBase::BufferSource::getFrameList(int32_t trackIndex) {
+vector<FrameData> &WriterFuzzerBase::BufferSource::getFrameList(int32_t trackIndex) {
return mFrameList[trackIndex];
}
@@ -92,9 +92,8 @@
} else {
break;
}
- mFrameList[trackIndex].insert(
- mFrameList[trackIndex].begin(),
- FrameData{static_cast<int32_t>(bufferSize), flags, pts, framePtr});
+ mFrameList[trackIndex].insert(mFrameList[trackIndex].begin(),
+ FrameData{bufferSize, flags, pts, framePtr});
bytesRemaining -= (frameSize + kMarkerSize + kMarkerSuffixSize);
--mReadIndex;
}
@@ -105,31 +104,36 @@
* Scenario where input data does not contain the custom frame markers.
* Hence feed the entire data as single frame.
*/
- mFrameList[0].emplace_back(
- FrameData{static_cast<int32_t>(mSize - readIndexStart), 0, 0, mData + readIndexStart});
+ mFrameList[0].emplace_back(FrameData{mSize - readIndexStart, 0, 0, mData + readIndexStart});
}
}
bool WriterFuzzerBase::BufferSource::getTrackInfo(int32_t trackIndex) {
- if (mSize <= mReadIndex + 2 * sizeof(int) + sizeof(uint8_t)) {
+ if (mSize <= mReadIndex + sizeof(uint8_t)) {
return false;
}
size_t mimeTypeIdx = mData[mReadIndex] % kSupportedMimeTypes;
char *mime = (char *)supportedMimeTypes[mimeTypeIdx].c_str();
mParams[trackIndex].mime = mime;
- ++mReadIndex;
+ mReadIndex += sizeof(uint8_t);
- if (!strncmp(mime, "audio/", 6)) {
- copy(mData + mReadIndex, mData + mReadIndex + sizeof(int),
- reinterpret_cast<char *>(&mParams[trackIndex].channelCount));
- copy(mData + mReadIndex + sizeof(int), mData + mReadIndex + 2 * sizeof(int),
- reinterpret_cast<char *>(&mParams[trackIndex].sampleRate));
+ if (mSize > mReadIndex + 2 * sizeof(int32_t)) {
+ if (!strncmp(mime, "audio/", 6)) {
+ copy(mData + mReadIndex, mData + mReadIndex + sizeof(int32_t),
+ reinterpret_cast<char *>(&mParams[trackIndex].channelCount));
+ copy(mData + mReadIndex + sizeof(int32_t), mData + mReadIndex + 2 * sizeof(int32_t),
+ reinterpret_cast<char *>(&mParams[trackIndex].sampleRate));
+ } else if (!strncmp(mime, "video/", 6)) {
+ copy(mData + mReadIndex, mData + mReadIndex + sizeof(int32_t),
+ reinterpret_cast<char *>(&mParams[trackIndex].height));
+ copy(mData + mReadIndex + sizeof(int32_t), mData + mReadIndex + 2 * sizeof(int32_t),
+ reinterpret_cast<char *>(&mParams[trackIndex].width));
+ }
+ mReadIndex += 2 * sizeof(int32_t);
} else {
- copy(mData + mReadIndex, mData + mReadIndex + sizeof(int),
- reinterpret_cast<char *>(&mParams[trackIndex].height));
- copy(mData + mReadIndex + sizeof(int), mData + mReadIndex + 2 * sizeof(int),
- reinterpret_cast<char *>(&mParams[trackIndex].width));
+ if (strncmp(mime, "text/", 5) && strncmp(mime, "application/", 12)) {
+ return false;
+ }
}
- mReadIndex += 2 * sizeof(int);
return true;
}
@@ -173,7 +177,7 @@
}
format->setInt32("channel-count", params.channelCount);
format->setInt32("sample-rate", params.sampleRate);
- } else {
+ } else if (!strncmp(params.mime, "video/", 6)) {
format->setInt32("width", params.width);
format->setInt32("height", params.height);
}
@@ -193,11 +197,10 @@
mWriter->start(mFileMeta.get());
}
-void WriterFuzzerBase::sendBuffersToWriter(sp<MediaAdapter> ¤tTrack, int32_t trackIndex) {
- int32_t numCsds = mBufferSource->getNumCsds(trackIndex);
+void WriterFuzzerBase::sendBuffersToWriter(sp<MediaAdapter> ¤tTrack, int32_t trackIndex,
+ int32_t startFrameIndex, int32_t endFrameIndex) {
vector<FrameData> bufferInfo = mBufferSource->getFrameList(trackIndex);
- int32_t range = bufferInfo.size();
- for (int idx = numCsds; idx < range; ++idx) {
+ for (int idx = startFrameIndex; idx < endFrameIndex; ++idx) {
sp<ABuffer> buffer = new ABuffer((void *)bufferInfo[idx].buf, bufferInfo[idx].size);
MediaBuffer *mediaBuffer = new MediaBuffer(buffer);
@@ -209,7 +212,7 @@
// Just set the kKeyDecodingTime as the presentation time for now.
sampleMetaData.setInt64(kKeyDecodingTime, bufferInfo[idx].timeUs);
- if (bufferInfo[idx].flags == 1) {
+ if (bufferInfo[idx].flags == SampleFlag::SYNC_FLAG) {
sampleMetaData.setInt32(kKeyIsSyncFrame, true);
}
@@ -218,6 +221,28 @@
}
}
+void WriterFuzzerBase::sendBuffersInterleave(int32_t numTracks, uint8_t numBuffersInterleave) {
+ int32_t currentFrameIndex[numTracks], remainingNumFrames[numTracks], numTrackFramesDone;
+ for (int32_t idx = 0; idx < numTracks; ++idx) {
+ currentFrameIndex[idx] = mBufferSource->getNumCsds(idx);
+ remainingNumFrames[idx] = mBufferSource->getFrameList(idx).size() - currentFrameIndex[idx];
+ }
+ do {
+ numTrackFramesDone = numTracks;
+ for (int32_t idx = 0; idx < numTracks; ++idx) {
+ if (remainingNumFrames[idx] > 0) {
+ int32_t numFramesInterleave =
+ min(remainingNumFrames[idx], static_cast<int32_t>(numBuffersInterleave));
+ sendBuffersToWriter(mCurrentTrack[idx], idx, currentFrameIndex[idx],
+ currentFrameIndex[idx] + numFramesInterleave);
+ currentFrameIndex[idx] += numFramesInterleave;
+ remainingNumFrames[idx] -= numFramesInterleave;
+ --numTrackFramesDone;
+ }
+ }
+ } while (numTrackFramesDone < numTracks);
+}
+
void WriterFuzzerBase::initFileWriterAndProcessData(const uint8_t *data, size_t size) {
if (!createOutputFile()) {
return;
@@ -225,6 +250,14 @@
if (!createWriter()) {
return;
}
+
+ if (size < 1) {
+ return;
+ }
+ uint8_t numBuffersInterleave = (data[0] == 0 ? 1 : data[0]);
+ ++data;
+ --size;
+
mBufferSource = new BufferSource(data, size);
if (!mBufferSource) {
return;
@@ -246,9 +279,7 @@
addWriterSource(idx);
}
start();
- for (int32_t idx = 0; idx < mNumTracks; ++idx) {
- sendBuffersToWriter(mCurrentTrack[idx], idx);
- }
+ sendBuffersInterleave(mNumTracks, numBuffersInterleave);
for (int32_t idx = 0; idx < mNumTracks; ++idx) {
if (mCurrentTrack[idx]) {
mCurrentTrack[idx]->stop();
diff --git a/media/libstagefright/writer_fuzzers/include/WriterFuzzerBase.h b/media/libstagefright/writer_fuzzers/include/WriterFuzzerBase.h
index da06463..4315322 100644
--- a/media/libstagefright/writer_fuzzers/include/WriterFuzzerBase.h
+++ b/media/libstagefright/writer_fuzzers/include/WriterFuzzerBase.h
@@ -34,7 +34,7 @@
using namespace std;
constexpr uint32_t kMimeSize = 128;
-constexpr uint8_t kMaxTrackCount = 2;
+constexpr uint8_t kMaxTrackCount = 3;
constexpr uint32_t kMaxCSDStrlen = 16;
constexpr uint32_t kCodecConfigFlag = 32;
@@ -49,25 +49,65 @@
};
struct FrameData {
- int32_t size;
+ size_t size;
uint8_t flags;
int64_t timeUs;
const uint8_t* buf;
};
-static string supportedMimeTypes[] = {
- "audio/3gpp", "audio/amr-wb", "audio/vorbis", "audio/opus",
- "audio/mp4a-latm", "video/avc", "video/hevc", "video/mp4v-es",
- "video/3gpp", "video/x-vnd.on2.vp8", "video/x-vnd.on2.vp9",
-};
+static string supportedMimeTypes[] = {"audio/3gpp",
+ "audio/amr-wb",
+ "audio/vorbis",
+ "audio/opus",
+ "audio/mp4a-latm",
+ "audio/mpeg",
+ "audio/mpeg-L1",
+ "audio/mpeg-L2",
+ "audio/midi",
+ "audio/qcelp",
+ "audio/g711-alaw",
+ "audio/g711-mlaw",
+ "audio/flac",
+ "audio/aac-adts",
+ "audio/gsm",
+ "audio/ac3",
+ "audio/eac3",
+ "audio/eac3-joc",
+ "audio/ac4",
+ "audio/scrambled",
+ "audio/alac",
+ "audio/x-ms-wma",
+ "audio/x-adpcm-ms",
+ "audio/x-adpcm-dvi-ima",
+ "video/avc",
+ "video/hevc",
+ "video/mp4v-es",
+ "video/3gpp",
+ "video/x-vnd.on2.vp8",
+ "video/x-vnd.on2.vp9",
+ "video/av01",
+ "video/mpeg2",
+ "video/dolby-vision",
+ "video/scrambled",
+ "video/divx",
+ "video/divx3",
+ "video/xvid",
+ "video/x-motion-jpeg",
+ "text/3gpp-tt",
+ "application/x-subrip",
+ "text/vtt",
+ "text/cea-608",
+ "text/cea-708",
+ "application/x-id3v4"};
-enum {
+enum SampleFlag {
DEFAULT_FLAG = 0,
SYNC_FLAG = 1,
ENCRYPTED_FLAG = 2,
};
-static uint8_t flagTypes[] = {DEFAULT_FLAG, SYNC_FLAG, ENCRYPTED_FLAG};
+static uint8_t flagTypes[] = {SampleFlag::DEFAULT_FLAG, SampleFlag::SYNC_FLAG,
+ SampleFlag::ENCRYPTED_FLAG};
class WriterFuzzerBase {
public:
@@ -105,7 +145,10 @@
void start();
- void sendBuffersToWriter(sp<MediaAdapter>& currentTrack, int32_t trackIndex);
+ void sendBuffersToWriter(sp<MediaAdapter>& currentTrack, int32_t trackIndex,
+ int32_t startFrameIndex, int32_t endFrameIndex);
+
+ void sendBuffersInterleave(int32_t numTracks, uint8_t numBuffersInterleave);
void initFileWriterAndProcessData(const uint8_t* data, size_t size);
@@ -126,7 +169,7 @@
void getFrameInfo();
ConfigFormat getConfigFormat(int32_t trackIndex);
int32_t getNumCsds(int32_t trackIndex);
- vector<FrameData> getFrameList(int32_t trackIndex);
+ vector<FrameData>& getFrameList(int32_t trackIndex);
private:
bool isMarker() { return (memcmp(&mData[mReadIndex], kMarker, kMarkerSize) == 0); }
diff --git a/media/ndk/Android.bp b/media/ndk/Android.bp
index b3b5d54..e9ea386 100644
--- a/media/ndk/Android.bp
+++ b/media/ndk/Android.bp
@@ -114,6 +114,7 @@
static_libs: [
"libgrallocusage",
+ "libnativehelper_lazy",
],
header_libs: [
@@ -142,7 +143,6 @@
"libgui",
"libui",
"libmediandk_utils",
- "libnativehelper",
],
export_header_lib_headers: ["jni_headers"],
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index e1d806d..4854d3a 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -65,11 +65,11 @@
constexpr float IN_CALL_EARPIECE_HEADROOM_DB = 3.f;
// Compressed formats for MSD module, ordered from most preferred to least preferred.
-static const std::vector<audio_format_t> compressedFormatsOrder = {{
- AUDIO_FORMAT_MAT_2_1, AUDIO_FORMAT_MAT_2_0, AUDIO_FORMAT_E_AC3,
+static const std::vector<audio_format_t> msdCompressedFormatsOrder = {{
+ AUDIO_FORMAT_IEC60958, AUDIO_FORMAT_MAT_2_1, AUDIO_FORMAT_MAT_2_0, AUDIO_FORMAT_E_AC3,
AUDIO_FORMAT_AC3, AUDIO_FORMAT_PCM_16_BIT }};
// Channel masks for MSD module, 3D > 2D > 1D ordering (most preferred to least preferred).
-static const std::vector<audio_channel_mask_t> surroundChannelMasksOrder = {{
+static const std::vector<audio_channel_mask_t> msdSurroundChannelMasksOrder = {{
AUDIO_CHANNEL_OUT_3POINT1POINT2, AUDIO_CHANNEL_OUT_3POINT0POINT2,
AUDIO_CHANNEL_OUT_2POINT1POINT2, AUDIO_CHANNEL_OUT_2POINT0POINT2,
AUDIO_CHANNEL_OUT_5POINT1, AUDIO_CHANNEL_OUT_STEREO }};
@@ -1037,7 +1037,7 @@
*output = AUDIO_IO_HANDLE_NONE;
if (!msdDevices.isEmpty()) {
*output = getOutputForDevices(msdDevices, session, *stream, config, flags);
- if (*output != AUDIO_IO_HANDLE_NONE && setMsdPatches(&outputDevices) == NO_ERROR) {
+ if (*output != AUDIO_IO_HANDLE_NONE && setMsdOutputPatches(&outputDevices) == NO_ERROR) {
ALOGV("%s() Using MSD devices %s instead of devices %s",
__func__, msdDevices.toString().c_str(), outputDevices.toString().c_str());
} else {
@@ -1203,7 +1203,7 @@
// An MSD patch may be using the only output stream that can service this request. Release
// all MSD patches to prioritize this request over any active output on MSD.
- releaseMsdPatches(devices);
+ releaseMsdOutputPatches(devices);
status_t status = outputDesc->open(config, devices, stream, flags, output);
@@ -1326,7 +1326,7 @@
mAvailableOutputDevices);
}
-const AudioPatchCollection AudioPolicyManager::getMsdPatches() const {
+const AudioPatchCollection AudioPolicyManager::getMsdOutputPatches() const {
AudioPatchCollection msdPatches;
sp<HwModule> msdModule = mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_MSD);
if (msdModule != 0) {
@@ -1344,50 +1344,47 @@
return msdPatches;
}
-status_t AudioPolicyManager::getBestMsdAudioProfileFor(const sp<DeviceDescriptor> &outputDevice,
- bool hwAvSync, audio_port_config *sourceConfig, audio_port_config *sinkConfig) const
-{
- sp<HwModule> msdModule = mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_MSD);
- if (msdModule == nullptr) {
- ALOGE("%s() unable to get MSD module", __func__);
- return NO_INIT;
- }
- sp<HwModule> deviceModule = mHwModules.getModuleForDevice(outputDevice, AUDIO_FORMAT_DEFAULT);
- if (deviceModule == nullptr) {
- ALOGE("%s() unable to get module for %s", __func__, outputDevice->toString().c_str());
- return NO_INIT;
- }
- const InputProfileCollection &inputProfiles = msdModule->getInputProfiles();
+status_t AudioPolicyManager::getMsdProfiles(bool hwAvSync,
+ const InputProfileCollection &inputProfiles,
+ const OutputProfileCollection &outputProfiles,
+ const sp<DeviceDescriptor> &sourceDevice,
+ const sp<DeviceDescriptor> &sinkDevice,
+ AudioProfileVector& sourceProfiles,
+ AudioProfileVector& sinkProfiles) const {
if (inputProfiles.isEmpty()) {
- ALOGE("%s() no input profiles for MSD module", __func__);
+ ALOGE("%s() no input profiles for source module", __func__);
return NO_INIT;
}
- const OutputProfileCollection &outputProfiles = deviceModule->getOutputProfiles();
if (outputProfiles.isEmpty()) {
- ALOGE("%s() no output profiles for device %s", __func__, outputDevice->toString().c_str());
+ ALOGE("%s() no output profiles for sink module", __func__);
return NO_INIT;
}
- AudioProfileVector msdProfiles;
- // Each IOProfile represents a MixPort from audio_policy_configuration.xml
for (const auto &inProfile : inputProfiles) {
- if (hwAvSync == ((inProfile->getFlags() & AUDIO_INPUT_FLAG_HW_AV_SYNC) != 0)) {
- appendAudioProfiles(msdProfiles, inProfile->getAudioProfiles());
+ if (hwAvSync == ((inProfile->getFlags() & AUDIO_INPUT_FLAG_HW_AV_SYNC) != 0) &&
+ inProfile->supportsDevice(sourceDevice)) {
+ appendAudioProfiles(sourceProfiles, inProfile->getAudioProfiles());
}
}
- AudioProfileVector deviceProfiles;
for (const auto &outProfile : outputProfiles) {
if (hwAvSync == ((outProfile->getFlags() & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) &&
- outProfile->supportsDevice(outputDevice)) {
- appendAudioProfiles(deviceProfiles, outProfile->getAudioProfiles());
+ outProfile->supportsDevice(sinkDevice)) {
+ appendAudioProfiles(sinkProfiles, outProfile->getAudioProfiles());
}
}
+ return NO_ERROR;
+}
+
+status_t AudioPolicyManager::getBestMsdConfig(bool hwAvSync,
+ const AudioProfileVector &sourceProfiles, const AudioProfileVector &sinkProfiles,
+ audio_port_config *sourceConfig, audio_port_config *sinkConfig) const
+{
struct audio_config_base bestSinkConfig;
- status_t result = findBestMatchingOutputConfig(msdProfiles, deviceProfiles,
- compressedFormatsOrder, surroundChannelMasksOrder, true /*preferHigherSamplingRates*/,
- bestSinkConfig);
+ status_t result = findBestMatchingOutputConfig(sourceProfiles, sinkProfiles,
+ msdCompressedFormatsOrder, msdSurroundChannelMasksOrder,
+ true /*preferHigherSamplingRates*/, bestSinkConfig);
if (result != NO_ERROR) {
- ALOGD("%s() no matching profiles found for device: %s, hwAvSync: %d",
- __func__, outputDevice->toString().c_str(), hwAvSync);
+ ALOGD("%s() no matching config found for sink, hwAvSync: %d",
+ __func__, hwAvSync);
return result;
}
sinkConfig->sample_rate = bestSinkConfig.sample_rate;
@@ -1398,7 +1395,7 @@
sinkConfig->flags.output | AUDIO_OUTPUT_FLAG_DIRECT);
if (audio_is_iec61937_compatible(sinkConfig->format)) {
// For formats compatible with IEC61937 encapsulation, assume that
- // the record thread input from MSD is IEC61937 framed (for proportional buffer sizing).
+ // the input is IEC61937 framed (for proportional buffer sizing).
// Add the AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO flag so downstream HAL can distinguish between
// raw and IEC61937 framed streams.
sinkConfig->flags.output = static_cast<audio_output_flags_t>(
@@ -1424,28 +1421,50 @@
return NO_ERROR;
}
-PatchBuilder AudioPolicyManager::buildMsdPatch(const sp<DeviceDescriptor> &outputDevice) const
+PatchBuilder AudioPolicyManager::buildMsdPatch(bool msdIsSource,
+ const sp<DeviceDescriptor> &device) const
{
PatchBuilder patchBuilder;
- patchBuilder.addSource(getMsdAudioInDevice()).addSink(outputDevice);
+ sp<HwModule> msdModule = mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_MSD);
+ ALOG_ASSERT(msdModule != nullptr, "MSD module not available");
+ sp<HwModule> deviceModule = mHwModules.getModuleForDevice(device, AUDIO_FORMAT_DEFAULT);
+ if (deviceModule == nullptr) {
+ ALOGE("%s() unable to get module for %s", __func__, device->toString().c_str());
+ return patchBuilder;
+ }
+ const InputProfileCollection inputProfiles = msdIsSource ?
+ msdModule->getInputProfiles() : deviceModule->getInputProfiles();
+ const OutputProfileCollection outputProfiles = msdIsSource ?
+ deviceModule->getOutputProfiles() : msdModule->getOutputProfiles();
+
+ const sp<DeviceDescriptor> sourceDevice = msdIsSource ? getMsdAudioInDevice() : device;
+ const sp<DeviceDescriptor> sinkDevice = msdIsSource ?
+ device : getMsdAudioOutDevices().itemAt(0);
+ patchBuilder.addSource(sourceDevice).addSink(sinkDevice);
+
audio_port_config sourceConfig = patchBuilder.patch()->sources[0];
audio_port_config sinkConfig = patchBuilder.patch()->sinks[0];
+ AudioProfileVector sourceProfiles;
+ AudioProfileVector sinkProfiles;
// TODO: Figure out whether MSD module has HW_AV_SYNC flag set in the AP config file.
// For now, we just forcefully try with HwAvSync first.
- status_t res = getBestMsdAudioProfileFor(outputDevice, true /*hwAvSync*/,
- &sourceConfig, &sinkConfig) == NO_ERROR ? NO_ERROR :
- getBestMsdAudioProfileFor(
- outputDevice, false /*hwAvSync*/, &sourceConfig, &sinkConfig);
- if (res == NO_ERROR) {
- // Found a matching profile for encoded audio. Re-create PatchBuilder with this config.
- return (PatchBuilder()).addSource(sourceConfig).addSink(sinkConfig);
+ for (auto hwAvSync : { true, false }) {
+ if (getMsdProfiles(hwAvSync, inputProfiles, outputProfiles, sourceDevice, sinkDevice,
+ sourceProfiles, sinkProfiles) != NO_ERROR) {
+ continue;
+ }
+ if (getBestMsdConfig(hwAvSync, sourceProfiles, sinkProfiles, &sourceConfig,
+ &sinkConfig) == NO_ERROR) {
+ // Found a matching config. Re-create PatchBuilder with this config.
+ return (PatchBuilder()).addSource(sourceConfig).addSink(sinkConfig);
+ }
}
- ALOGV("%s() no matching profile found. Fall through to default PCM patch"
+ ALOGV("%s() no matching config found. Fall through to default PCM patch"
" supporting PCM format conversion.", __func__);
return patchBuilder;
}
-status_t AudioPolicyManager::setMsdPatches(const DeviceVector *outputDevices) {
+status_t AudioPolicyManager::setMsdOutputPatches(const DeviceVector *outputDevices) {
DeviceVector devices;
if (outputDevices != nullptr && outputDevices->size() > 0) {
devices.add(*outputDevices);
@@ -1460,11 +1479,11 @@
std::vector<PatchBuilder> patchesToCreate;
for (auto i = 0u; i < devices.size(); ++i) {
ALOGV("%s() for device %s", __func__, devices[i]->toString().c_str());
- patchesToCreate.push_back(buildMsdPatch(devices[i]));
+ patchesToCreate.push_back(buildMsdPatch(true /*msdIsSource*/, devices[i]));
}
// Retain only the MSD patches associated with outputDevices request.
// Tear down the others, and create new ones as needed.
- AudioPatchCollection patchesToRemove = getMsdPatches();
+ AudioPatchCollection patchesToRemove = getMsdOutputPatches();
for (auto it = patchesToCreate.begin(); it != patchesToCreate.end(); ) {
auto retainedPatch = false;
for (auto i = 0u; i < patchesToRemove.size(); ++i) {
@@ -1509,8 +1528,8 @@
return status;
}
-void AudioPolicyManager::releaseMsdPatches(const DeviceVector& devices) {
- AudioPatchCollection msdPatches = getMsdPatches();
+void AudioPolicyManager::releaseMsdOutputPatches(const DeviceVector& devices) {
+ AudioPatchCollection msdPatches = getMsdOutputPatches();
for (size_t i = 0; i < msdPatches.size(); i++) {
const auto& patch = msdPatches[i];
for (size_t j = 0; j < patch->mPatch.num_sinks; ++j) {
@@ -3829,6 +3848,15 @@
// be incomplete.
PatchBuilder patchBuilder;
audio_port_config sourcePortConfig = {};
+
+ // if first sink is to MSD, establish single MSD patch
+ if (getMsdAudioOutDevices().contains(
+ mAvailableOutputDevices.getDeviceFromId(patch->sinks[0].id))) {
+ ALOGV("%s patching to MSD", __FUNCTION__);
+ patchBuilder = buildMsdPatch(false /*msdIsSource*/, srcDevice);
+ goto installPatch;
+ }
+
srcDevice->toAudioPortConfig(&sourcePortConfig, &patch->sources[0]);
patchBuilder.addSource(sourcePortConfig);
@@ -3924,6 +3952,7 @@
}
// TODO: check from routing capabilities in config file and other conflicting patches
+installPatch:
status_t status = installPatch(
__func__, index, handle, patchBuilder.patch(), delayMs, uid, &patchDesc);
if (status != NO_ERROR) {
@@ -5351,7 +5380,7 @@
// arguments to mEngine->getOutputDevicesForAttributes() when resolving which output
// devices to patch to. This may be complicated by the fact that devices may become
// unavailable.
- setMsdPatches();
+ setMsdOutputPatches();
}
}
}
@@ -5424,7 +5453,7 @@
// unnecessary rerouting by caching and reusing the arguments to
// mEngine->getOutputDevicesForAttributes() when resolving which output devices to patch to.
// This may be complicated by the fact that devices may become unavailable.
- setMsdPatches();
+ setMsdOutputPatches();
}
}
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index c1c483c..ed5be5e 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -847,14 +847,22 @@
// Support for Multi-Stream Decoder (MSD) module
sp<DeviceDescriptor> getMsdAudioInDevice() const;
DeviceVector getMsdAudioOutDevices() const;
- const AudioPatchCollection getMsdPatches() const;
- status_t getBestMsdAudioProfileFor(const sp<DeviceDescriptor> &outputDevice,
- bool hwAvSync,
- audio_port_config *sourceConfig,
- audio_port_config *sinkConfig) const;
- PatchBuilder buildMsdPatch(const sp<DeviceDescriptor> &outputDevice) const;
- status_t setMsdPatches(const DeviceVector *outputDevices = nullptr);
- void releaseMsdPatches(const DeviceVector& devices);
+ const AudioPatchCollection getMsdOutputPatches() const;
+ status_t getMsdProfiles(bool hwAvSync,
+ const InputProfileCollection &inputProfiles,
+ const OutputProfileCollection &outputProfiles,
+ const sp<DeviceDescriptor> &sourceDevice,
+ const sp<DeviceDescriptor> &sinkDevice,
+ AudioProfileVector &sourceProfiles,
+ AudioProfileVector &sinkProfiles) const;
+ status_t getBestMsdConfig(bool hwAvSync,
+ const AudioProfileVector &sourceProfiles,
+ const AudioProfileVector &sinkProfiles,
+ audio_port_config *sourceConfig,
+ audio_port_config *sinkConfig) const;
+ PatchBuilder buildMsdPatch(bool msdIsSource, const sp<DeviceDescriptor> &device) const;
+ status_t setMsdOutputPatches(const DeviceVector *outputDevices = nullptr);
+ void releaseMsdOutputPatches(const DeviceVector& devices);
private:
void onNewAudioModulesAvailableInt(DeviceVector *newDevices);
diff --git a/services/audiopolicy/tests/AudioPolicyTestManager.h b/services/audiopolicy/tests/AudioPolicyTestManager.h
index c096427..6150206 100644
--- a/services/audiopolicy/tests/AudioPolicyTestManager.h
+++ b/services/audiopolicy/tests/AudioPolicyTestManager.h
@@ -29,8 +29,9 @@
using AudioPolicyManager::getOutputs;
using AudioPolicyManager::getAvailableOutputDevices;
using AudioPolicyManager::getAvailableInputDevices;
- using AudioPolicyManager::releaseMsdPatches;
- using AudioPolicyManager::setMsdPatches;
+ using AudioPolicyManager::releaseMsdOutputPatches;
+ using AudioPolicyManager::setMsdOutputPatches;
+ using AudioPolicyManager::getAudioPatches;
uint32_t getAudioPortGeneration() const { return mAudioPortGeneration; }
};
diff --git a/services/audiopolicy/tests/audiopolicymanager_tests.cpp b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
index f391606..5b6b3e7 100644
--- a/services/audiopolicy/tests/audiopolicymanager_tests.cpp
+++ b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
@@ -340,6 +340,8 @@
const size_t mExpectedAudioPatchCount;
sp<DeviceDescriptor> mSpdifDevice;
+
+ sp<DeviceDescriptor> mHdmiInputDevice;
};
AudioPolicyManagerTestMsd::AudioPolicyManagerTestMsd()
@@ -366,8 +368,11 @@
AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO, 48000);
sp<AudioProfile> ac3OutputProfile = new AudioProfile(
AUDIO_FORMAT_AC3, AUDIO_CHANNEL_OUT_5POINT1, 48000);
+ sp<AudioProfile> iec958OutputProfile = new AudioProfile(
+ AUDIO_FORMAT_IEC60958, AUDIO_CHANNEL_OUT_STEREO, 48000);
mMsdOutputDevice->addAudioProfile(pcmOutputProfile);
mMsdOutputDevice->addAudioProfile(ac3OutputProfile);
+ mMsdOutputDevice->addAudioProfile(iec958OutputProfile);
mMsdInputDevice = new DeviceDescriptor(AUDIO_DEVICE_IN_BUS);
// Match output profile from AudioPolicyConfig::setDefault.
sp<AudioProfile> pcmInputProfile = new AudioProfile(
@@ -405,6 +410,11 @@
AUDIO_OUTPUT_FLAG_NON_BLOCKING);
msdCompressedOutputProfile->addSupportedDevice(mMsdOutputDevice);
msdModule->addOutputProfile(msdCompressedOutputProfile);
+ sp<OutputProfile> msdIec958OutputProfile = new OutputProfile("msd iec958 input");
+ msdIec958OutputProfile->addAudioProfile(iec958OutputProfile);
+ msdIec958OutputProfile->setFlags(AUDIO_OUTPUT_FLAG_DIRECT);
+ msdIec958OutputProfile->addSupportedDevice(mMsdOutputDevice);
+ msdModule->addOutputProfile(msdIec958OutputProfile);
sp<InputProfile> msdInputProfile = new InputProfile("msd output");
msdInputProfile->addAudioProfile(pcmInputProfile);
@@ -428,6 +438,19 @@
mSpdifDevice->addAudioProfile(dtsOutputProfile);
primaryEncodedOutputProfile->addSupportedDevice(mSpdifDevice);
}
+
+ // Add HDMI input device with IEC60958 profile for HDMI in -> MSD patching.
+ mHdmiInputDevice = new DeviceDescriptor(AUDIO_DEVICE_IN_HDMI);
+ sp<AudioProfile> iec958InputProfile = new AudioProfile(
+ AUDIO_FORMAT_IEC60958, AUDIO_CHANNEL_IN_STEREO, 48000);
+ mHdmiInputDevice->addAudioProfile(iec958InputProfile);
+ config.addDevice(mHdmiInputDevice);
+ sp<InputProfile> hdmiInputProfile = new InputProfile("hdmi input");
+ hdmiInputProfile->addAudioProfile(iec958InputProfile);
+ hdmiInputProfile->setFlags(AUDIO_INPUT_FLAG_DIRECT);
+ hdmiInputProfile->addSupportedDevice(mHdmiInputDevice);
+ config.getHwModules().getModuleFromName(AUDIO_HARDWARE_MODULE_ID_PRIMARY)->
+ addInputProfile(hdmiInputProfile);
}
void AudioPolicyManagerTestMsd::TearDown() {
@@ -435,6 +458,7 @@
mMsdInputDevice.clear();
mDefaultOutputDevice.clear();
mSpdifDevice.clear();
+ mHdmiInputDevice.clear();
AudioPolicyManagerTest::TearDown();
}
@@ -455,21 +479,21 @@
ASSERT_EQ(mExpectedAudioPatchCount, patchCount.deltaFromSnapshot());
}
-TEST_P(AudioPolicyManagerTestMsd, PatchCreationSetReleaseMsdPatches) {
+TEST_P(AudioPolicyManagerTestMsd, PatchCreationSetReleaseMsdOutputPatches) {
const PatchCountCheck patchCount = snapshotPatchCount();
DeviceVector devices = mManager->getAvailableOutputDevices();
// Remove MSD output device to avoid patching to itself
devices.remove(mMsdOutputDevice);
ASSERT_EQ(mExpectedAudioPatchCount, devices.size());
- mManager->setMsdPatches(&devices);
+ mManager->setMsdOutputPatches(&devices);
ASSERT_EQ(mExpectedAudioPatchCount, patchCount.deltaFromSnapshot());
// Dual patch: exercise creating one new audio patch and reusing another existing audio patch.
DeviceVector singleDevice(devices[0]);
- mManager->releaseMsdPatches(singleDevice);
+ mManager->releaseMsdOutputPatches(singleDevice);
ASSERT_EQ(mExpectedAudioPatchCount - 1, patchCount.deltaFromSnapshot());
- mManager->setMsdPatches(&devices);
+ mManager->setMsdOutputPatches(&devices);
ASSERT_EQ(mExpectedAudioPatchCount, patchCount.deltaFromSnapshot());
- mManager->releaseMsdPatches(devices);
+ mManager->releaseMsdOutputPatches(devices);
ASSERT_EQ(0, patchCount.deltaFromSnapshot());
}
@@ -550,6 +574,34 @@
}
}
+TEST_P(AudioPolicyManagerTestMsd, PatchCreationFromHdmiInToMsd) {
+ audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
+ uid_t uid = 42;
+ const PatchCountCheck patchCount = snapshotPatchCount();
+ ASSERT_FALSE(mManager->getAvailableInputDevices().isEmpty());
+ PatchBuilder patchBuilder;
+ patchBuilder.
+ addSource(mManager->getAvailableInputDevices().
+ getDevice(AUDIO_DEVICE_IN_HDMI, String8(""), AUDIO_FORMAT_DEFAULT)).
+ addSink(mManager->getAvailableOutputDevices().
+ getDevice(AUDIO_DEVICE_OUT_BUS, String8(""), AUDIO_FORMAT_DEFAULT));
+ ASSERT_EQ(NO_ERROR, mManager->createAudioPatch(patchBuilder.patch(), &handle, uid));
+ ASSERT_NE(AUDIO_PATCH_HANDLE_NONE, handle);
+ AudioPatchCollection patches = mManager->getAudioPatches();
+ sp<AudioPatch> patch = patches.valueFor(handle);
+ ASSERT_EQ(1, patch->mPatch.num_sources);
+ ASSERT_EQ(1, patch->mPatch.num_sinks);
+ ASSERT_EQ(AUDIO_PORT_ROLE_SOURCE, patch->mPatch.sources[0].role);
+ ASSERT_EQ(AUDIO_PORT_ROLE_SINK, patch->mPatch.sinks[0].role);
+ ASSERT_EQ(AUDIO_FORMAT_IEC60958, patch->mPatch.sources[0].format);
+ ASSERT_EQ(AUDIO_FORMAT_IEC60958, patch->mPatch.sinks[0].format);
+ ASSERT_EQ(AUDIO_CHANNEL_IN_STEREO, patch->mPatch.sources[0].channel_mask);
+ ASSERT_EQ(AUDIO_CHANNEL_OUT_STEREO, patch->mPatch.sinks[0].channel_mask);
+ ASSERT_EQ(48000, patch->mPatch.sources[0].sample_rate);
+ ASSERT_EQ(48000, patch->mPatch.sinks[0].sample_rate);
+ ASSERT_EQ(1, patchCount.deltaFromSnapshot());
+}
+
class AudioPolicyManagerTestWithConfigurationFile : public AudioPolicyManagerTest {
protected:
void SetUpManagerConfig() override;
diff --git a/services/camera/libcameraservice/libcameraservice_fuzzer/Android.bp b/services/camera/libcameraservice/libcameraservice_fuzzer/Android.bp
new file mode 100644
index 0000000..df4ef95
--- /dev/null
+++ b/services/camera/libcameraservice/libcameraservice_fuzzer/Android.bp
@@ -0,0 +1,55 @@
+/******************************************************************************
+ *
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *****************************************************************************
+ * Originally developed and contributed by Ittiam Systems Pvt. Ltd, Bangalore
+ */
+
+cc_fuzz {
+ name: "camera_service_fuzzer",
+ srcs: [
+ "camera_service_fuzzer.cpp",
+ ],
+ header_libs: [
+ "libmedia_headers",
+ ],
+ shared_libs: [
+ "libbinder",
+ "libbase",
+ "libutils",
+ "libcutils",
+ "libcameraservice",
+ "libcamera_client",
+ "libui",
+ "libgui",
+ "android.hardware.camera.common@1.0",
+ "android.hardware.camera.provider@2.4",
+ "android.hardware.camera.provider@2.5",
+ "android.hardware.camera.provider@2.6",
+ "android.hardware.camera.device@1.0",
+ "android.hardware.camera.device@3.2",
+ "android.hardware.camera.device@3.3",
+ "android.hardware.camera.device@3.4",
+ "android.hardware.camera.device@3.5",
+ "android.hardware.camera.device@3.6",
+ ],
+ fuzz_config: {
+ cc: [
+ "android-media-fuzzing-reports@google.com",
+ ],
+ componentid: 155276,
+ },
+}
diff --git a/services/camera/libcameraservice/libcameraservice_fuzzer/README.md b/services/camera/libcameraservice/libcameraservice_fuzzer/README.md
new file mode 100644
index 0000000..c703845
--- /dev/null
+++ b/services/camera/libcameraservice/libcameraservice_fuzzer/README.md
@@ -0,0 +1,59 @@
+# Fuzzer for libcameraservice
+
+## Plugin Design Considerations
+The fuzzer plugin is designed based on the understanding of the
+library and tries to achieve the following:
+
+##### Maximize code coverage
+The configuration parameters are not hardcoded, but instead selected based on
+incoming data. This ensures more code paths are reached by the fuzzer.
+
+libcameraservice supports the following parameters:
+1. Camera Type (parameter name: `cameraType`)
+2. Camera API Version (parameter name: `cameraAPIVersion`)
+3. Event ID (parameter name: `eventId`)
+4. Camera Sound Kind (parameter name: `soundKind`)
+5. Shell Command (parameter name: `shellCommand`)
+
+| Parameter| Valid Values| Configured Value|
+|------------- |-------------| ----- |
+| `cameraType` | 0. `CAMERA_TYPE_BACKWARD_COMPATIBLE` 1. `CAMERA_TYPE_ALL` | Value obtained from FuzzedDataProvider |
+| `cameraAPIVersion` | 0. `API_VERSION_1` 1. `API_VERSION_2` | Value obtained from FuzzedDataProvider |
+| `eventId` | 0. `EVENT_USER_SWITCHED` 1. `EVENT_NONE` | Value obtained from FuzzedDataProvider |
+| `soundKind` | 0. `SOUND_SHUTTER` 1. `SOUND_RECORDING_START` 2. `SOUND_RECORDING_STOP`| Value obtained from FuzzedDataProvider |
+| `shellCommand` | 0. `set-uid-state` 1. `reset-uid-state` 2. `get-uid-state` 3. `set-rotate-and-crop` 4. `get-rotate-and-crop` 5. `help`| Value obtained from FuzzedDataProvider |
+
+This also ensures that the plugin is always deterministic for any given input.
+
+##### Maximize utilization of input data
+The plugin tolerates any kind of input (empty, huge,
+malformed, etc) and doesn't `exit()` on any input and thereby increasing the
+chance of identifying vulnerabilities.
+
+## Build
+
+This describes steps to build camera_service_fuzzer binary.
+
+### Android
+
+#### Steps to build
+Build the fuzzer
+```
+ $ mm -j$(nproc) camera_service_fuzzer
+```
+
+#### Steps to run
+Create a directory CORPUS_DIR
+```
+ $ adb shell mkdir CORPUS_DIR
+```
+
+To run on device
+```
+ $ adb sync data
+ $ adb shell /data/fuzz/arm64/camera_service_fuzzer/camera_service_fuzzer CORPUS_DIR
+```
+
+## References:
+ * http://llvm.org/docs/LibFuzzer.html
+ * https://github.com/google/oss-fuzz
diff --git a/services/camera/libcameraservice/libcameraservice_fuzzer/camera_service_fuzzer.cpp b/services/camera/libcameraservice/libcameraservice_fuzzer/camera_service_fuzzer.cpp
new file mode 100644
index 0000000..54550a5
--- /dev/null
+++ b/services/camera/libcameraservice/libcameraservice_fuzzer/camera_service_fuzzer.cpp
@@ -0,0 +1,433 @@
+/******************************************************************************
+ *
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *****************************************************************************
+ * Originally developed and contributed by Ittiam Systems Pvt. Ltd, Bangalore
+ */
+
+#include <CameraService.h>
+#include <android/hardware/ICameraServiceListener.h>
+#include <gui/Surface.h>
+#include <gui/SurfaceComposerClient.h>
+#include <private/android_filesystem_config.h>
+#include "fuzzer/FuzzedDataProvider.h"
+
+using namespace android;
+using namespace hardware;
+using namespace std;
+
+const int32_t kPreviewThreshold = 8;
+const nsecs_t kPreviewTimeout = 5000000000; // .5 [s.]
+const nsecs_t kEventTimeout = 10000000000; // 1 [s.]
+const size_t kMaxNumLines = USHRT_MAX;
+const size_t kMinArgs = 1;
+const size_t kMaxArgs = 5;
+const int32_t kCamType[] = {hardware::ICameraService::CAMERA_TYPE_BACKWARD_COMPATIBLE,
+ hardware::ICameraService::CAMERA_TYPE_ALL};
+const int kCameraApiVersion[] = {android::CameraService::API_VERSION_1,
+ android::CameraService::API_VERSION_2};
+const int kLayerMetadata[] = {
+ 0x00100000 /*GRALLOC_USAGE_RENDERSCRIPT*/, 0x00000003 /*GRALLOC_USAGE_SW_READ_OFTEN*/,
+ 0x00000100 /*GRALLOC_USAGE_HW_TEXTURE*/, 0x00000800 /*GRALLOC_USAGE_HW_COMPOSER*/,
+ 0x00000200 /*GRALLOC_USAGE_HW_RENDER*/, 0x00010000 /*GRALLOC_USAGE_HW_VIDEO_ENCODER*/};
+const int kCameraMsg[] = {0x001 /*CAMERA_MSG_ERROR*/,
+ 0x002 /*CAMERA_MSG_SHUTTER*/,
+ 0x004 /*CAMERA_MSG_FOCUS*/,
+ 0x008 /*CAMERA_MSG_ZOOM*/,
+ 0x010 /*CAMERA_MSG_PREVIEW_FRAME*/,
+ 0x020 /*CAMERA_MSG_VIDEO_FRAME */,
+ 0x040 /*CAMERA_MSG_POSTVIEW_FRAME*/,
+ 0x080 /*CAMERA_MSG_RAW_IMAGE */,
+ 0x100 /*CAMERA_MSG_COMPRESSED_IMAGE*/,
+ 0x200 /*CAMERA_MSG_RAW_IMAGE_NOTIFY*/,
+ 0x400 /*CAMERA_MSG_PREVIEW_METADATA*/,
+ 0x800 /*CAMERA_MSG_FOCUS_MOVE*/};
+const int32_t kEventId[] = {ICameraService::EVENT_USER_SWITCHED, ICameraService::EVENT_NONE};
+const android::CameraService::sound_kind kSoundKind[] = {
+ android::CameraService::SOUND_SHUTTER, android::CameraService::SOUND_RECORDING_START,
+ android::CameraService::SOUND_RECORDING_STOP};
+const String16 kShellCmd[] = {String16("set-uid-state"), String16("reset-uid-state"),
+ String16("get-uid-state"), String16("set-rotate-and-crop"),
+ String16("get-rotate-and-crop"), String16("help")};
+const size_t kNumLayerMetaData = size(kLayerMetadata);
+const size_t kNumCameraMsg = size(kCameraMsg);
+const size_t kNumSoundKind = size(kSoundKind);
+const size_t kNumShellCmd = size(kShellCmd);
+
+class CameraFuzzer : public ::android::hardware::BnCameraClient {
+ public:
+ CameraFuzzer() = default;
+ ~CameraFuzzer() { deInit(); }
+ bool init();
+ void process(const uint8_t *data, size_t size);
+ void deInit();
+
+ private:
+ FuzzedDataProvider *mFuzzedDataProvider = nullptr;
+ sp<CameraService> mCameraService = nullptr;
+ sp<SurfaceComposerClient> mComposerClient = nullptr;
+ int32_t mNumCameras = 0;
+ size_t mPreviewBufferCount = 0;
+ bool mAutoFocusMessage = false;
+ bool mSnapshotNotification = false;
+ mutable Mutex mPreviewLock;
+ mutable Condition mPreviewCondition;
+ mutable Mutex mAutoFocusLock;
+ mutable Condition mAutoFocusCondition;
+ mutable Mutex mSnapshotLock;
+ mutable Condition mSnapshotCondition;
+
+ void getNumCameras();
+ void getCameraInformation(int32_t cameraId);
+ void invokeCameraAPIs();
+ void invokeCameraSound();
+ void invokeDump();
+ void invokeShellCommand();
+ void invokeNotifyCalls();
+
+ // CameraClient interface
+ void notifyCallback(int32_t msgType, int32_t, int32_t) override;
+ void dataCallback(int32_t msgType, const sp<IMemory> &, camera_frame_metadata_t *) override;
+ void dataCallbackTimestamp(nsecs_t, int32_t, const sp<IMemory> &) override{};
+ void recordingFrameHandleCallbackTimestamp(nsecs_t, native_handle_t *) override{};
+ void recordingFrameHandleCallbackTimestampBatch(
+ const std::vector<nsecs_t> &, const std::vector<native_handle_t *> &) override{};
+ status_t waitForPreviewStart();
+ status_t waitForEvent(Mutex &mutex, Condition &condition, bool &flag);
+};
+
+void CameraFuzzer::notifyCallback(int32_t msgType, int32_t, int32_t) {
+ if (CAMERA_MSG_FOCUS == msgType) {
+ Mutex::Autolock l(mAutoFocusLock);
+ mAutoFocusMessage = true;
+ mAutoFocusCondition.broadcast();
+ }
+};
+
+void CameraFuzzer::dataCallback(int32_t msgType, const sp<IMemory> & /*data*/,
+ camera_frame_metadata_t *) {
+ switch (msgType) {
+ case CAMERA_MSG_PREVIEW_FRAME: {
+ Mutex::Autolock l(mPreviewLock);
+ ++mPreviewBufferCount;
+ mPreviewCondition.broadcast();
+ break;
+ }
+ case CAMERA_MSG_COMPRESSED_IMAGE: {
+ Mutex::Autolock l(mSnapshotLock);
+ mSnapshotNotification = true;
+ mSnapshotCondition.broadcast();
+ break;
+ }
+ default:
+ break;
+ }
+};
+
+status_t CameraFuzzer::waitForPreviewStart() {
+ status_t rc = NO_ERROR;
+ Mutex::Autolock l(mPreviewLock);
+ mPreviewBufferCount = 0;
+
+ while (mPreviewBufferCount < kPreviewThreshold) {
+ rc = mPreviewCondition.waitRelative(mPreviewLock, kPreviewTimeout);
+ if (NO_ERROR != rc) {
+ break;
+ }
+ }
+
+ return rc;
+}
+
+status_t CameraFuzzer::waitForEvent(Mutex &mutex, Condition &condition, bool &flag) {
+ status_t rc = NO_ERROR;
+ Mutex::Autolock l(mutex);
+ flag = false;
+
+ while (!flag) {
+ rc = condition.waitRelative(mutex, kEventTimeout);
+ if (NO_ERROR != rc) {
+ break;
+ }
+ }
+
+ return rc;
+}
+
+bool CameraFuzzer::init() {
+ setuid(AID_MEDIA);
+ mCameraService = new CameraService();
+ if (mCameraService) {
+ return true;
+ }
+ return false;
+}
+
+void CameraFuzzer::deInit() {
+ if (mCameraService) {
+ mCameraService = nullptr;
+ }
+ if (mComposerClient) {
+ mComposerClient->dispose();
+ }
+}
+
+void CameraFuzzer::getNumCameras() {
+ bool shouldPassInvalidCamType = mFuzzedDataProvider->ConsumeBool();
+ int32_t camType;
+ if (shouldPassInvalidCamType) {
+ camType = mFuzzedDataProvider->ConsumeIntegral<int32_t>();
+ } else {
+ camType = kCamType[mFuzzedDataProvider->ConsumeBool()];
+ }
+ mCameraService->getNumberOfCameras(camType, &mNumCameras);
+}
+
+void CameraFuzzer::getCameraInformation(int32_t cameraId) {
+ String16 cameraIdStr = String16(String8::format("%d", cameraId));
+ bool isSupported = false;
+ mCameraService->supportsCameraApi(
+ cameraIdStr, kCameraApiVersion[mFuzzedDataProvider->ConsumeBool()], &isSupported);
+ mCameraService->isHiddenPhysicalCamera(cameraIdStr, &isSupported);
+
+ String16 parameters;
+ mCameraService->getLegacyParameters(cameraId, ¶meters);
+
+ std::vector<hardware::camera2::utils::ConcurrentCameraIdCombination> concurrentCameraIds;
+ mCameraService->getConcurrentCameraIds(&concurrentCameraIds);
+
+ hardware::camera2::params::VendorTagDescriptorCache cache;
+ mCameraService->getCameraVendorTagCache(&cache);
+
+ CameraInfo cameraInfo;
+ mCameraService->getCameraInfo(cameraId, &cameraInfo);
+
+ CameraMetadata metadata;
+ mCameraService->getCameraCharacteristics(cameraIdStr, &metadata);
+}
+
+void CameraFuzzer::invokeCameraSound() {
+ mCameraService->increaseSoundRef();
+ mCameraService->decreaseSoundRef();
+ bool shouldPassInvalidPlaySound = mFuzzedDataProvider->ConsumeBool();
+ bool shouldPassInvalidLockSound = mFuzzedDataProvider->ConsumeBool();
+ android::CameraService::sound_kind playSound, lockSound;
+ if (shouldPassInvalidPlaySound) {
+ playSound = static_cast<android::CameraService::sound_kind>(
+ mFuzzedDataProvider->ConsumeIntegral<size_t>());
+ } else {
+ playSound =
+ kSoundKind[mFuzzedDataProvider->ConsumeIntegralInRange<size_t>(0, kNumSoundKind - 1)];
+ }
+
+ if (shouldPassInvalidLockSound) {
+ lockSound = static_cast<android::CameraService::sound_kind>(
+ mFuzzedDataProvider->ConsumeIntegral<size_t>());
+ } else {
+ lockSound =
+ kSoundKind[mFuzzedDataProvider->ConsumeIntegralInRange<size_t>(0, kNumSoundKind - 1)];
+ }
+ mCameraService->playSound(playSound);
+ mCameraService->loadSoundLocked(lockSound);
+}
+
+void CameraFuzzer::invokeDump() {
+ Vector<String16> args;
+ size_t numberOfLines = mFuzzedDataProvider->ConsumeIntegralInRange<size_t>(0, kMaxNumLines);
+ for (size_t lineIdx = 0; lineIdx < numberOfLines; ++lineIdx) {
+ args.add(static_cast<String16>(mFuzzedDataProvider->ConsumeRandomLengthString().c_str()));
+ }
+ const char *fileName = "logDumpFile";
+ int fd = memfd_create(fileName, MFD_ALLOW_SEALING);
+ mCameraService->dump(fd, args);
+ close(fd);
+}
+
+void CameraFuzzer::invokeShellCommand() {
+ int in = mFuzzedDataProvider->ConsumeIntegral<int>();
+ int out = mFuzzedDataProvider->ConsumeIntegral<int>();
+ int err = mFuzzedDataProvider->ConsumeIntegral<int>();
+ Vector<String16> args;
+ size_t numArgs = mFuzzedDataProvider->ConsumeIntegralInRange<size_t>(kMinArgs, kMaxArgs);
+ for (size_t argsIdx = 0; argsIdx < numArgs; ++argsIdx) {
+ bool shouldPassInvalidCommand = mFuzzedDataProvider->ConsumeBool();
+ if (shouldPassInvalidCommand) {
+ args.add(
+ static_cast<String16>(mFuzzedDataProvider->ConsumeRandomLengthString().c_str()));
+ } else {
+ args.add(kShellCmd[mFuzzedDataProvider->ConsumeIntegralInRange<size_t>(
+ 0, kNumShellCmd - 1)]);
+ }
+ }
+ mCameraService->shellCommand(in, out, err, args);
+}
+
+void CameraFuzzer::invokeNotifyCalls() {
+ mCameraService->notifyMonitoredUids();
+ int64_t newState = mFuzzedDataProvider->ConsumeIntegral<int64_t>();
+ mCameraService->notifyDeviceStateChange(newState);
+ std::vector<int32_t> args;
+ size_t numArgs = mFuzzedDataProvider->ConsumeIntegralInRange<size_t>(kMinArgs, kMaxArgs);
+ for (size_t argsIdx = 0; argsIdx < numArgs; ++argsIdx) {
+ args.push_back(mFuzzedDataProvider->ConsumeIntegral<int32_t>());
+ }
+ bool shouldPassInvalidEvent = mFuzzedDataProvider->ConsumeBool();
+ int32_t eventId;
+ if (shouldPassInvalidEvent) {
+ eventId = mFuzzedDataProvider->ConsumeIntegral<int32_t>();
+ } else {
+ eventId = kEventId[mFuzzedDataProvider->ConsumeBool()];
+ }
+ mCameraService->notifySystemEvent(eventId, args);
+}
+
+void CameraFuzzer::invokeCameraAPIs() {
+ for (int32_t cameraId = 0; cameraId < mNumCameras; ++cameraId) {
+ getCameraInformation(cameraId);
+
+ const String16 opPackageName("com.fuzzer.poc");
+ ::android::binder::Status rc;
+ sp<ICamera> cameraDevice;
+
+ rc = mCameraService->connect(this, cameraId, opPackageName, AID_MEDIA, AID_ROOT,
+ &cameraDevice);
+ if (!rc.isOk()) {
+ // camera not connected
+ return;
+ }
+ if (cameraDevice) {
+ sp<Surface> previewSurface;
+ sp<SurfaceControl> surfaceControl;
+ CameraParameters params(cameraDevice->getParameters());
+ String8 focusModes(params.get(CameraParameters::KEY_SUPPORTED_FOCUS_MODES));
+ bool isAFSupported = false;
+ const char *focusMode = nullptr;
+
+ if (focusModes.contains(CameraParameters::FOCUS_MODE_AUTO)) {
+ isAFSupported = true;
+ } else if (focusModes.contains(CameraParameters::FOCUS_MODE_CONTINUOUS_PICTURE)) {
+ isAFSupported = true;
+ focusMode = CameraParameters::FOCUS_MODE_CONTINUOUS_PICTURE;
+ } else if (focusModes.contains(CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO)) {
+ isAFSupported = true;
+ focusMode = CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO;
+ } else if (focusModes.contains(CameraParameters::FOCUS_MODE_MACRO)) {
+ isAFSupported = true;
+ focusMode = CameraParameters::FOCUS_MODE_MACRO;
+ }
+ if (nullptr != focusMode) {
+ params.set(CameraParameters::KEY_FOCUS_MODE, focusMode);
+ cameraDevice->setParameters(params.flatten());
+ }
+ int previewWidth, previewHeight;
+ params.getPreviewSize(&previewWidth, &previewHeight);
+
+ mComposerClient = new SurfaceComposerClient;
+ mComposerClient->initCheck();
+
+ bool shouldPassInvalidLayerMetaData = mFuzzedDataProvider->ConsumeBool();
+ int layerMetaData;
+ if (shouldPassInvalidLayerMetaData) {
+ layerMetaData = mFuzzedDataProvider->ConsumeIntegral<int>();
+ } else {
+ layerMetaData = kLayerMetadata[mFuzzedDataProvider->ConsumeIntegralInRange<size_t>(
+ 0, kNumLayerMetaData - 1)];
+ }
+ surfaceControl = mComposerClient->createSurface(
+ String8("Test Surface"), previewWidth, previewHeight,
+ CameraParameters::previewFormatToEnum(params.getPreviewFormat()), layerMetaData);
+
+ if (surfaceControl.get() != nullptr) {
+ SurfaceComposerClient::Transaction{}
+ .setLayer(surfaceControl, 0x7fffffff)
+ .show(surfaceControl)
+ .apply();
+
+ previewSurface = surfaceControl->getSurface();
+ cameraDevice->setPreviewTarget(previewSurface->getIGraphicBufferProducer());
+ }
+ cameraDevice->setPreviewCallbackFlag(CAMERA_FRAME_CALLBACK_FLAG_CAMCORDER);
+
+ Vector<Size> pictureSizes;
+ params.getSupportedPictureSizes(pictureSizes);
+
+ for (size_t i = 0; i < pictureSizes.size(); ++i) {
+ params.setPictureSize(pictureSizes[i].width, pictureSizes[i].height);
+ cameraDevice->setParameters(params.flatten());
+ cameraDevice->startPreview();
+ waitForPreviewStart();
+ cameraDevice->autoFocus();
+ waitForEvent(mAutoFocusLock, mAutoFocusCondition, mAutoFocusMessage);
+ bool shouldPassInvalidCameraMsg = mFuzzedDataProvider->ConsumeBool();
+ int msgType;
+ if (shouldPassInvalidCameraMsg) {
+ msgType = mFuzzedDataProvider->ConsumeIntegral<int>();
+ } else {
+ msgType = kCameraMsg[mFuzzedDataProvider->ConsumeIntegralInRange<size_t>(
+ 0, kNumCameraMsg - 1)];
+ }
+ cameraDevice->takePicture(msgType);
+
+ waitForEvent(mSnapshotLock, mSnapshotCondition, mSnapshotNotification);
+ }
+
+ Vector<Size> videoSizes;
+ params.getSupportedVideoSizes(videoSizes);
+
+ for (size_t i = 0; i < videoSizes.size(); ++i) {
+ params.setVideoSize(videoSizes[i].width, videoSizes[i].height);
+
+ cameraDevice->setParameters(params.flatten());
+ cameraDevice->startPreview();
+ waitForPreviewStart();
+ cameraDevice->setVideoBufferMode(
+ android::hardware::BnCamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE);
+ cameraDevice->setVideoTarget(previewSurface->getIGraphicBufferProducer());
+ cameraDevice->startRecording();
+ cameraDevice->stopRecording();
+ }
+ cameraDevice->stopPreview();
+ cameraDevice->disconnect();
+ }
+ }
+}
+
+void CameraFuzzer::process(const uint8_t *data, size_t size) {
+ mFuzzedDataProvider = new FuzzedDataProvider(data, size);
+ getNumCameras();
+ invokeCameraSound();
+ if (mNumCameras > 0) {
+ invokeCameraAPIs();
+ }
+ invokeDump();
+ invokeShellCommand();
+ invokeNotifyCalls();
+ delete mFuzzedDataProvider;
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+ if (size < 1) {
+ return 0;
+ }
+ sp<CameraFuzzer> camerafuzzer = new CameraFuzzer();
+ if (!camerafuzzer) {
+ return 0;
+ }
+ if (camerafuzzer->init()) {
+ camerafuzzer->process(data, size);
+ }
+ return 0;
+}
diff --git a/services/mediametrics/Android.bp b/services/mediametrics/Android.bp
index 8ddd3f0..d11720b 100644
--- a/services/mediametrics/Android.bp
+++ b/services/mediametrics/Android.bp
@@ -30,7 +30,7 @@
"modernize-loop-convert",
"modernize-make-shared",
"modernize-make-unique",
- "modernize-pass-by-value",
+ // "modernize-pass-by-value", // found in TimeMachine.h
"modernize-raw-string-literal",
"modernize-redundant-void-arg",
"modernize-replace-auto-ptr",
@@ -38,13 +38,13 @@
"modernize-return-braced-init-list",
"modernize-shrink-to-fit",
"modernize-unary-static-assert",
- "modernize-use-auto", // debatable - auto can obscure type
+ // "modernize-use-auto", // found in MediaMetricsService.h, debatable - auto can obscure type
"modernize-use-bool-literals",
"modernize-use-default-member-init",
"modernize-use-emplace",
"modernize-use-equals-default",
"modernize-use-equals-delete",
- "modernize-use-nodiscard",
+ // "modernize-use-nodiscard", // found in TimeMachine.h
"modernize-use-noexcept",
"modernize-use-nullptr",
"modernize-use-override",
@@ -57,6 +57,10 @@
// Remove some pedantic stylistic requirements.
"-google-readability-casting", // C++ casts not always necessary and may be verbose
"-google-readability-todo", // do not require TODO(info)
+
+ "-bugprone-unhandled-self-assignment", // found in TimeMachine.h
+ "-bugprone-suspicious-string-compare", // found in TimeMachine.h
+ "-cert-oop54-cpp", // found in TransactionLog.h
]
cc_defaults {
@@ -88,8 +92,7 @@
tidy_checks: tidy_errors,
tidy_checks_as_errors: tidy_errors,
tidy_flags: [
- "-format-style='file'",
- "--header-filter='frameworks/av/services/mediametrics/'",
+ "-format-style=file",
],
}