Merge "transcoding: use libandroid for activity_manager"
diff --git a/Android.bp b/Android.bp
index 327593f..a7cf3e5 100644
--- a/Android.bp
+++ b/Android.bp
@@ -54,4 +54,9 @@
"com.android.media",
"com.android.media.swcodec",
],
+ target: {
+ darwin: {
+ enabled: false,
+ },
+ },
}
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 2d54bd1..5006cd8 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -1957,7 +1957,10 @@
* explicitly set ACAMERA_CONTROL_ZOOM_RATIO, its value defaults to 1.0.</p>
* <p>One limitation of controlling zoom using zoomRatio is that the ACAMERA_SCALER_CROP_REGION
* must only be used for letterboxing or pillarboxing of the sensor active array, and no
- * FREEFORM cropping can be used with ACAMERA_CONTROL_ZOOM_RATIO other than 1.0.</p>
+ * FREEFORM cropping can be used with ACAMERA_CONTROL_ZOOM_RATIO other than 1.0. If
+ * ACAMERA_CONTROL_ZOOM_RATIO is not 1.0, and ACAMERA_SCALER_CROP_REGION is set to be
+ * windowboxing, the camera framework will override the ACAMERA_SCALER_CROP_REGION to be
+ * the active array.</p>
*
* @see ACAMERA_CONTROL_AE_REGIONS
* @see ACAMERA_CONTROL_ZOOM_RATIO
@@ -3651,7 +3654,9 @@
* </ol>
* </li>
* <li>Setting ACAMERA_CONTROL_ZOOM_RATIO to values different than 1.0 and
- * ACAMERA_SCALER_CROP_REGION to be windowboxing at the same time is undefined behavior.</li>
+ * ACAMERA_SCALER_CROP_REGION to be windowboxing at the same time are not supported. In this
+ * case, the camera framework will override the ACAMERA_SCALER_CROP_REGION to be the active
+ * array.</li>
* </ul>
* <p>LEGACY capability devices will only support CENTER_ONLY cropping.</p>
*
diff --git a/include/media/MicrophoneInfo.h b/include/media/MicrophoneInfo.h
index e89401a..a5045b9 100644
--- a/include/media/MicrophoneInfo.h
+++ b/include/media/MicrophoneInfo.h
@@ -208,6 +208,21 @@
int32_t mDirectionality;
};
+// Conversion routines, according to AidlConversion.h conventions.
+inline ConversionResult<MicrophoneInfo>
+aidl2legacy_MicrophoneInfo(const media::MicrophoneInfoData& aidl) {
+ MicrophoneInfo legacy;
+ RETURN_IF_ERROR(legacy.readFromParcelable(aidl));
+ return legacy;
+}
+
+inline ConversionResult<media::MicrophoneInfoData>
+legacy2aidl_MicrophoneInfo(const MicrophoneInfo& legacy) {
+ media::MicrophoneInfoData aidl;
+ RETURN_IF_ERROR(legacy.writeToParcelable(&aidl));
+ return aidl;
+}
+
} // namespace media
} // namespace android
diff --git a/media/codec2/hidl/client/client.cpp b/media/codec2/hidl/client/client.cpp
index 7e4352d..4650672 100644
--- a/media/codec2/hidl/client/client.cpp
+++ b/media/codec2/hidl/client/client.cpp
@@ -843,6 +843,11 @@
return;
}
});
+ if (!transStatus.isOk()) {
+ LOG(DEBUG) << "SimpleParamReflector -- transaction failed: "
+ << transStatus.description();
+ descriptor.reset();
+ }
return descriptor;
}
diff --git a/media/codec2/sfplugin/CCodecConfig.cpp b/media/codec2/sfplugin/CCodecConfig.cpp
index 96f86e8..79c6227 100644
--- a/media/codec2/sfplugin/CCodecConfig.cpp
+++ b/media/codec2/sfplugin/CCodecConfig.cpp
@@ -1151,14 +1151,11 @@
bool changed = false;
if (domain & mInputDomain) {
- sp<AMessage> oldFormat = mInputFormat;
- mInputFormat = mInputFormat->dup(); // trigger format changed
+ sp<AMessage> oldFormat = mInputFormat->dup();
mInputFormat->extend(getFormatForDomain(reflected, mInputDomain));
if (mInputFormat->countEntries() != oldFormat->countEntries()
|| mInputFormat->changesFrom(oldFormat)->countEntries() > 0) {
changed = true;
- } else {
- mInputFormat = oldFormat; // no change
}
}
if (domain & mOutputDomain) {
diff --git a/media/codec2/vndk/C2AllocatorBlob.cpp b/media/codec2/vndk/C2AllocatorBlob.cpp
index 565137c..6340cba 100644
--- a/media/codec2/vndk/C2AllocatorBlob.cpp
+++ b/media/codec2/vndk/C2AllocatorBlob.cpp
@@ -17,6 +17,8 @@
// #define LOG_NDEBUG 0
#define LOG_TAG "C2AllocatorBlob"
+#include <set>
+
#include <C2AllocatorBlob.h>
#include <C2PlatformSupport.h>
@@ -67,6 +69,10 @@
private:
const std::shared_ptr<C2GraphicAllocation> mGraphicAllocation;
const C2Allocator::id_t mAllocatorId;
+
+ std::mutex mMapLock;
+ std::multiset<std::pair<size_t, size_t>> mMappedOffsetSize;
+ uint8_t *mMappedAddr;
};
C2AllocationBlob::C2AllocationBlob(
@@ -74,20 +80,74 @@
C2Allocator::id_t allocatorId)
: C2LinearAllocation(capacity),
mGraphicAllocation(std::move(graphicAllocation)),
- mAllocatorId(allocatorId) {}
+ mAllocatorId(allocatorId),
+ mMappedAddr(nullptr) {}
-C2AllocationBlob::~C2AllocationBlob() {}
+C2AllocationBlob::~C2AllocationBlob() {
+ if (mMappedAddr) {
+ C2Rect rect(capacity(), kLinearBufferHeight);
+ mGraphicAllocation->unmap(&mMappedAddr, rect, nullptr);
+ }
+}
c2_status_t C2AllocationBlob::map(size_t offset, size_t size, C2MemoryUsage usage,
C2Fence* fence, void** addr /* nonnull */) {
+ *addr = nullptr;
+ if (size > capacity() || offset > capacity() || offset > capacity() - size) {
+ ALOGV("C2AllocationBlob: map: bad offset / size: offset=%zu size=%zu capacity=%u",
+ offset, size, capacity());
+ return C2_BAD_VALUE;
+ }
+ std::unique_lock<std::mutex> lock(mMapLock);
+ if (mMappedAddr) {
+ *addr = mMappedAddr + offset;
+ mMappedOffsetSize.insert({offset, size});
+ ALOGV("C2AllocationBlob: mapped from existing mapping: offset=%zu size=%zu capacity=%u",
+ offset, size, capacity());
+ return C2_OK;
+ }
C2PlanarLayout layout;
- C2Rect rect = C2Rect(size, kLinearBufferHeight).at(offset, 0u);
- return mGraphicAllocation->map(rect, usage, fence, &layout, reinterpret_cast<uint8_t**>(addr));
+ C2Rect rect = C2Rect(capacity(), kLinearBufferHeight);
+ c2_status_t err = mGraphicAllocation->map(rect, usage, fence, &layout, &mMappedAddr);
+ if (err != C2_OK) {
+ ALOGV("C2AllocationBlob: map failed: offset=%zu size=%zu capacity=%u err=%d",
+ offset, size, capacity(), err);
+ mMappedAddr = nullptr;
+ return err;
+ }
+ *addr = mMappedAddr + offset;
+ mMappedOffsetSize.insert({offset, size});
+ ALOGV("C2AllocationBlob: new map succeeded: offset=%zu size=%zu capacity=%u",
+ offset, size, capacity());
+ return C2_OK;
}
c2_status_t C2AllocationBlob::unmap(void* addr, size_t size, C2Fence* fenceFd) {
- C2Rect rect(size, kLinearBufferHeight);
- return mGraphicAllocation->unmap(reinterpret_cast<uint8_t**>(&addr), rect, fenceFd);
+ std::unique_lock<std::mutex> lock(mMapLock);
+ uint8_t *u8Addr = static_cast<uint8_t *>(addr);
+ if (u8Addr < mMappedAddr || mMappedAddr + capacity() < u8Addr + size) {
+ ALOGV("C2AllocationBlob: unmap: Bad addr / size: addr=%p size=%zu capacity=%u",
+ addr, size, capacity());
+ return C2_BAD_VALUE;
+ }
+ auto it = mMappedOffsetSize.find(std::make_pair(u8Addr - mMappedAddr, size));
+ if (it == mMappedOffsetSize.end()) {
+ ALOGV("C2AllocationBlob: unrecognized map: addr=%p size=%zu capacity=%u",
+ addr, size, capacity());
+ return C2_BAD_VALUE;
+ }
+ mMappedOffsetSize.erase(it);
+ if (!mMappedOffsetSize.empty()) {
+ ALOGV("C2AllocationBlob: still maintain mapping: addr=%p size=%zu capacity=%u",
+ addr, size, capacity());
+ return C2_OK;
+ }
+ C2Rect rect(capacity(), kLinearBufferHeight);
+ c2_status_t err = mGraphicAllocation->unmap(&mMappedAddr, rect, fenceFd);
+ ALOGV("C2AllocationBlob: last unmap: addr=%p size=%zu capacity=%u err=%d",
+ addr, size, capacity(), err);
+ mMappedAddr = nullptr;
+ return err;
}
/* ====================================== BLOB ALLOCATOR ====================================== */
diff --git a/media/libaudioclient/AidlConversion.cpp b/media/libaudioclient/AidlConversion.cpp
index 8fb836d..496dfc7 100644
--- a/media/libaudioclient/AidlConversion.cpp
+++ b/media/libaudioclient/AidlConversion.cpp
@@ -110,29 +110,6 @@
}
////////////////////////////////////////////////////////////////////////////////////////////////////
-// Utilities for working with AIDL unions.
-// UNION_GET(obj, fieldname) returns a ConversionResult<T> containing either the strongly-typed
-// value of the respective field, or BAD_VALUE if the union is not set to the requested field.
-// UNION_SET(obj, fieldname, value) sets the requested field to the given value.
-
-template<typename T, typename T::Tag tag>
-using UnionFieldType = std::decay_t<decltype(std::declval<T>().template get<tag>())>;
-
-template<typename T, typename T::Tag tag>
-ConversionResult<UnionFieldType<T, tag>> unionGetField(const T& u) {
- if (u.getTag() != tag) {
- return unexpected(BAD_VALUE);
- }
- return u.template get<tag>();
-}
-
-#define UNION_GET(u, field) \
- unionGetField<std::decay_t<decltype(u)>, std::decay_t<decltype(u)>::Tag::field>(u)
-
-#define UNION_SET(u, field, value) \
- (u).set<std::decay_t<decltype(u)>::Tag::field>(value)
-
-////////////////////////////////////////////////////////////////////////////////////////////////////
enum class Direction {
INPUT, OUTPUT
@@ -259,6 +236,14 @@
return convertReinterpret<int32_t>(legacy);
}
+ConversionResult<audio_hw_sync_t> aidl2legacy_int32_t_audio_hw_sync_t(int32_t aidl) {
+ return convertReinterpret<audio_hw_sync_t>(aidl);
+}
+
+ConversionResult<int32_t> legacy2aidl_audio_hw_sync_t_int32_t(audio_hw_sync_t legacy) {
+ return convertReinterpret<int32_t>(legacy);
+}
+
ConversionResult<pid_t> aidl2legacy_int32_t_pid_t(int32_t aidl) {
return convertReinterpret<pid_t>(aidl);
}
@@ -483,7 +468,7 @@
return static_cast<media::audio::common::AudioFormat>(legacy);
}
-ConversionResult<int> aidl2legacy_AudioGainMode_int(media::AudioGainMode aidl) {
+ConversionResult<audio_gain_mode_t> aidl2legacy_AudioGainMode_audio_gain_mode_t(media::AudioGainMode aidl) {
switch (aidl) {
case media::AudioGainMode::JOINT:
return AUDIO_GAIN_MODE_JOINT;
@@ -496,7 +481,7 @@
}
}
-ConversionResult<media::AudioGainMode> legacy2aidl_int_AudioGainMode(int legacy) {
+ConversionResult<media::AudioGainMode> legacy2aidl_audio_gain_mode_t_AudioGainMode(audio_gain_mode_t legacy) {
switch (legacy) {
case AUDIO_GAIN_MODE_JOINT:
return media::AudioGainMode::JOINT;
@@ -509,20 +494,20 @@
}
}
-ConversionResult<audio_gain_mode_t> aidl2legacy_int32_t_audio_gain_mode_t(int32_t aidl) {
- return convertBitmask<audio_gain_mode_t, int32_t, int, media::AudioGainMode>(
- aidl, aidl2legacy_AudioGainMode_int,
+ConversionResult<audio_gain_mode_t> aidl2legacy_int32_t_audio_gain_mode_t_mask(int32_t aidl) {
+ return convertBitmask<audio_gain_mode_t, int32_t, audio_gain_mode_t, media::AudioGainMode>(
+ aidl, aidl2legacy_AudioGainMode_audio_gain_mode_t,
// AudioGainMode is index-based.
index2enum_index<media::AudioGainMode>,
// AUDIO_GAIN_MODE_* constants are mask-based.
- enumToMask_bitmask<audio_gain_mode_t, int>);
+ enumToMask_bitmask<audio_gain_mode_t, audio_gain_mode_t>);
}
-ConversionResult<int32_t> legacy2aidl_audio_gain_mode_t_int32_t(audio_gain_mode_t legacy) {
- return convertBitmask<int32_t, audio_gain_mode_t, media::AudioGainMode, int>(
- legacy, legacy2aidl_int_AudioGainMode,
+ConversionResult<int32_t> legacy2aidl_audio_gain_mode_t_mask_int32_t(audio_gain_mode_t legacy) {
+ return convertBitmask<int32_t, audio_gain_mode_t, media::AudioGainMode, audio_gain_mode_t>(
+ legacy, legacy2aidl_audio_gain_mode_t_AudioGainMode,
// AUDIO_GAIN_MODE_* constants are mask-based.
- index2enum_bitmask<int>,
+ index2enum_bitmask<audio_gain_mode_t>,
// AudioGainMode is index-based.
enumToMask_index<int32_t, media::AudioGainMode>);
}
@@ -541,7 +526,7 @@
const media::AudioGainConfig& aidl, media::AudioPortRole role, media::AudioPortType type) {
audio_gain_config legacy;
legacy.index = VALUE_OR_RETURN(convertIntegral<int>(aidl.index));
- legacy.mode = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_gain_mode_t(aidl.mode));
+ legacy.mode = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_gain_mode_t_mask(aidl.mode));
legacy.channel_mask =
VALUE_OR_RETURN(aidl2legacy_int32_t_audio_channel_mask_t(aidl.channelMask));
const bool isInput = VALUE_OR_RETURN(direction(role, type)) == Direction::INPUT;
@@ -563,7 +548,7 @@
const audio_gain_config& legacy, audio_port_role_t role, audio_port_type_t type) {
media::AudioGainConfig aidl;
aidl.index = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.index));
- aidl.mode = VALUE_OR_RETURN(legacy2aidl_audio_gain_mode_t_int32_t(legacy.mode));
+ aidl.mode = VALUE_OR_RETURN(legacy2aidl_audio_gain_mode_t_mask_int32_t(legacy.mode));
aidl.channelMask =
VALUE_OR_RETURN(legacy2aidl_audio_channel_mask_t_int32_t(legacy.channel_mask));
const bool isInput = VALUE_OR_RETURN(direction(role, type)) == Direction::INPUT;
@@ -977,7 +962,7 @@
switch (role) {
case media::AudioPortRole::NONE:
// Just verify that the union is empty.
- VALUE_OR_RETURN(UNION_GET(aidl, nothing));
+ VALUE_OR_RETURN(UNION_GET(aidl, unspecified));
break;
case media::AudioPortRole::SOURCE:
@@ -1004,7 +989,7 @@
switch (role) {
case AUDIO_PORT_ROLE_NONE:
- UNION_SET(aidl, nothing, false);
+ UNION_SET(aidl, unspecified, false);
break;
case AUDIO_PORT_ROLE_SOURCE:
// This is not a bug. A SOURCE role corresponds to the stream field.
@@ -1061,12 +1046,10 @@
const media::AudioPortConfigExt& aidl, media::AudioPortType type,
media::AudioPortRole role) {
audio_port_config_ext legacy;
- // Our way of representing a union in AIDL is to have multiple vectors and require that at most
- // one of the them has size 1 and the rest are empty.
switch (type) {
case media::AudioPortType::NONE:
// Just verify that the union is empty.
- VALUE_OR_RETURN(UNION_GET(aidl, nothing));
+ VALUE_OR_RETURN(UNION_GET(aidl, unspecified));
break;
case media::AudioPortType::DEVICE:
legacy.device = VALUE_OR_RETURN(
@@ -1092,7 +1075,7 @@
switch (type) {
case AUDIO_PORT_TYPE_NONE:
- UNION_SET(aidl, nothing, false);
+ UNION_SET(aidl, unspecified, false);
break;
case AUDIO_PORT_TYPE_DEVICE:
UNION_SET(aidl, device,
@@ -1829,4 +1812,374 @@
enumToMask_index<int32_t, media::AudioEncapsulationMetadataType>);
}
+ConversionResult<audio_mix_latency_class_t>
+aidl2legacy_AudioMixLatencyClass_audio_mix_latency_class_t(
+ media::AudioMixLatencyClass aidl) {
+ switch (aidl) {
+ case media::AudioMixLatencyClass::LOW:
+ return AUDIO_LATENCY_LOW;
+ case media::AudioMixLatencyClass::NORMAL:
+ return AUDIO_LATENCY_NORMAL;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<media::AudioMixLatencyClass>
+legacy2aidl_audio_mix_latency_class_t_AudioMixLatencyClass(
+ audio_mix_latency_class_t legacy) {
+ switch (legacy) {
+ case AUDIO_LATENCY_LOW:
+ return media::AudioMixLatencyClass::LOW;
+ case AUDIO_LATENCY_NORMAL:
+ return media::AudioMixLatencyClass::NORMAL;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<audio_port_device_ext>
+aidl2legacy_AudioPortDeviceExt_audio_port_device_ext(const media::AudioPortDeviceExt& aidl) {
+ audio_port_device_ext legacy;
+ legacy.hw_module = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_module_handle_t(aidl.hwModule));
+ legacy.type = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_devices_t(aidl.device.type));
+ RETURN_IF_ERROR(
+ aidl2legacy_string(aidl.device.address, legacy.address, sizeof(legacy.address)));
+ legacy.encapsulation_modes = VALUE_OR_RETURN(
+ aidl2legacy_AudioEncapsulationMode_mask(aidl.encapsulationModes));
+ legacy.encapsulation_metadata_types = VALUE_OR_RETURN(
+ aidl2legacy_AudioEncapsulationMetadataType_mask(aidl.encapsulationMetadataTypes));
+ return legacy;
+}
+
+ConversionResult<media::AudioPortDeviceExt>
+legacy2aidl_audio_port_device_ext_AudioPortDeviceExt(const audio_port_device_ext& legacy) {
+ media::AudioPortDeviceExt aidl;
+ aidl.hwModule = VALUE_OR_RETURN(legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
+ aidl.device.type = VALUE_OR_RETURN(legacy2aidl_audio_devices_t_int32_t(legacy.type));
+ aidl.device.address = VALUE_OR_RETURN(
+ legacy2aidl_string(legacy.address, sizeof(legacy.address)));
+ aidl.encapsulationModes = VALUE_OR_RETURN(
+ legacy2aidl_AudioEncapsulationMode_mask(legacy.encapsulation_modes));
+ aidl.encapsulationMetadataTypes = VALUE_OR_RETURN(
+ legacy2aidl_AudioEncapsulationMetadataType_mask(legacy.encapsulation_metadata_types));
+ return aidl;
+}
+
+ConversionResult<audio_port_mix_ext>
+aidl2legacy_AudioPortMixExt_audio_port_mix_ext(const media::AudioPortMixExt& aidl) {
+ audio_port_mix_ext legacy;
+ legacy.hw_module = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_module_handle_t(aidl.hwModule));
+ legacy.handle = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_io_handle_t(aidl.handle));
+ legacy.latency_class = VALUE_OR_RETURN(
+ aidl2legacy_AudioMixLatencyClass_audio_mix_latency_class_t(aidl.latencyClass));
+ return legacy;
+}
+
+ConversionResult<media::AudioPortMixExt>
+legacy2aidl_audio_port_mix_ext_AudioPortMixExt(const audio_port_mix_ext& legacy) {
+ media::AudioPortMixExt aidl;
+ aidl.hwModule = VALUE_OR_RETURN(legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
+ aidl.handle = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(legacy.handle));
+ aidl.latencyClass = VALUE_OR_RETURN(
+ legacy2aidl_audio_mix_latency_class_t_AudioMixLatencyClass(legacy.latency_class));
+ return aidl;
+}
+
+ConversionResult<audio_port_session_ext>
+aidl2legacy_AudioPortSessionExt_audio_port_session_ext(const media::AudioPortSessionExt& aidl) {
+ audio_port_session_ext legacy;
+ legacy.session = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_session_t(aidl.session));
+ return legacy;
+}
+
+ConversionResult<media::AudioPortSessionExt>
+legacy2aidl_audio_port_session_ext_AudioPortSessionExt(const audio_port_session_ext& legacy) {
+ media::AudioPortSessionExt aidl;
+ aidl.session = VALUE_OR_RETURN(legacy2aidl_audio_session_t_int32_t(legacy.session));
+ return aidl;
+}
+
+// This type is unnamed in the original definition, thus we name it here.
+using audio_port_v7_ext = decltype(audio_port_v7::ext);
+
+ConversionResult<audio_port_v7_ext> aidl2legacy_AudioPortExt(
+ const media::AudioPortExt& aidl, media::AudioPortType type) {
+ audio_port_v7_ext legacy;
+ switch (type) {
+ case media::AudioPortType::NONE:
+ // Just verify that the union is empty.
+ VALUE_OR_RETURN(UNION_GET(aidl, unspecified));
+ break;
+ case media::AudioPortType::DEVICE:
+ legacy.device = VALUE_OR_RETURN(
+ aidl2legacy_AudioPortDeviceExt_audio_port_device_ext(
+ VALUE_OR_RETURN(UNION_GET(aidl, device))));
+ break;
+ case media::AudioPortType::MIX:
+ legacy.mix = VALUE_OR_RETURN(
+ aidl2legacy_AudioPortMixExt_audio_port_mix_ext(
+ VALUE_OR_RETURN(UNION_GET(aidl, mix))));
+ break;
+ case media::AudioPortType::SESSION:
+ legacy.session = VALUE_OR_RETURN(aidl2legacy_AudioPortSessionExt_audio_port_session_ext(
+ VALUE_OR_RETURN(UNION_GET(aidl, session))));
+ break;
+ default:
+ LOG_ALWAYS_FATAL("Shouldn't get here");
+ }
+ return legacy;
+}
+
+ConversionResult<media::AudioPortExt> legacy2aidl_AudioPortExt(
+ const audio_port_v7_ext& legacy, audio_port_type_t type) {
+ media::AudioPortExt aidl;
+ switch (type) {
+ case AUDIO_PORT_TYPE_NONE:
+ UNION_SET(aidl, unspecified, false);
+ break;
+ case AUDIO_PORT_TYPE_DEVICE:
+ UNION_SET(aidl, device,
+ VALUE_OR_RETURN(
+ legacy2aidl_audio_port_device_ext_AudioPortDeviceExt(legacy.device)));
+ break;
+ case AUDIO_PORT_TYPE_MIX:
+ UNION_SET(aidl, mix,
+ VALUE_OR_RETURN(legacy2aidl_audio_port_mix_ext_AudioPortMixExt(legacy.mix)));
+ break;
+ case AUDIO_PORT_TYPE_SESSION:
+ UNION_SET(aidl, session,
+ VALUE_OR_RETURN(legacy2aidl_audio_port_session_ext_AudioPortSessionExt(
+ legacy.session)));
+ break;
+ default:
+ LOG_ALWAYS_FATAL("Shouldn't get here");
+ }
+ return aidl;
+}
+
+ConversionResult<audio_profile>
+aidl2legacy_AudioProfile_audio_profile(const media::AudioProfile& aidl) {
+ audio_profile legacy;
+ legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormat_audio_format_t(aidl.format));
+
+ if (aidl.samplingRates.size() > std::size(legacy.sample_rates)) {
+ return unexpected(BAD_VALUE);
+ }
+ RETURN_IF_ERROR(
+ convertRange(aidl.samplingRates.begin(), aidl.samplingRates.end(), legacy.sample_rates,
+ convertIntegral<int32_t, unsigned int>));
+ legacy.num_sample_rates = aidl.samplingRates.size();
+
+ if (aidl.channelMasks.size() > std::size(legacy.channel_masks)) {
+ return unexpected(BAD_VALUE);
+ }
+ RETURN_IF_ERROR(
+ convertRange(aidl.channelMasks.begin(), aidl.channelMasks.end(), legacy.channel_masks,
+ aidl2legacy_int32_t_audio_channel_mask_t));
+ legacy.num_channel_masks = aidl.channelMasks.size();
+ return legacy;
+}
+
+ConversionResult<media::AudioProfile>
+legacy2aidl_audio_profile_AudioProfile(const audio_profile& legacy) {
+ media::AudioProfile aidl;
+ aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormat(legacy.format));
+
+ if (legacy.num_sample_rates > std::size(legacy.sample_rates)) {
+ return unexpected(BAD_VALUE);
+ }
+ RETURN_IF_ERROR(
+ convertRange(legacy.sample_rates, legacy.sample_rates + legacy.num_sample_rates,
+ std::back_inserter(aidl.samplingRates),
+ convertIntegral<unsigned int, int32_t>));
+
+ if (legacy.num_channel_masks > std::size(legacy.channel_masks)) {
+ return unexpected(BAD_VALUE);
+ }
+ RETURN_IF_ERROR(
+ convertRange(legacy.channel_masks, legacy.channel_masks + legacy.num_channel_masks,
+ std::back_inserter(aidl.channelMasks),
+ legacy2aidl_audio_channel_mask_t_int32_t));
+ return aidl;
+}
+
+ConversionResult<audio_gain>
+aidl2legacy_AudioGain_audio_gain(const media::AudioGain& aidl) {
+ audio_gain legacy;
+ legacy.mode = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_gain_mode_t_mask(aidl.mode));
+ legacy.channel_mask = VALUE_OR_RETURN(
+ aidl2legacy_int32_t_audio_channel_mask_t(aidl.channelMask));
+ legacy.min_value = VALUE_OR_RETURN(convertIntegral<int>(aidl.minValue));
+ legacy.max_value = VALUE_OR_RETURN(convertIntegral<int>(aidl.maxValue));
+ legacy.default_value = VALUE_OR_RETURN(convertIntegral<int>(aidl.defaultValue));
+ legacy.step_value = VALUE_OR_RETURN(convertIntegral<unsigned int>(aidl.stepValue));
+ legacy.min_ramp_ms = VALUE_OR_RETURN(convertIntegral<unsigned int>(aidl.minRampMs));
+ legacy.max_ramp_ms = VALUE_OR_RETURN(convertIntegral<unsigned int>(aidl.maxRampMs));
+ return legacy;
+}
+
+ConversionResult<media::AudioGain>
+legacy2aidl_audio_gain_AudioGain(const audio_gain& legacy) {
+ media::AudioGain aidl;
+ aidl.mode = VALUE_OR_RETURN(legacy2aidl_audio_gain_mode_t_mask_int32_t(legacy.mode));
+ aidl.channelMask = VALUE_OR_RETURN(
+ legacy2aidl_audio_channel_mask_t_int32_t(legacy.channel_mask));
+ aidl.minValue = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.min_value));
+ aidl.maxValue = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.max_value));
+ aidl.defaultValue = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.default_value));
+ aidl.stepValue = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.step_value));
+ aidl.minRampMs = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.min_ramp_ms));
+ aidl.maxRampMs = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.max_ramp_ms));
+ return aidl;
+}
+
+ConversionResult<audio_port_v7>
+aidl2legacy_AudioPort_audio_port_v7(const media::AudioPort& aidl) {
+ audio_port_v7 legacy;
+ legacy.id = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.id));
+ legacy.role = VALUE_OR_RETURN(aidl2legacy_AudioPortRole_audio_port_role_t(aidl.role));
+ legacy.type = VALUE_OR_RETURN(aidl2legacy_AudioPortType_audio_port_type_t(aidl.type));
+ RETURN_IF_ERROR(aidl2legacy_string(aidl.name, legacy.name, sizeof(legacy.name)));
+
+ if (aidl.profiles.size() > std::size(legacy.audio_profiles)) {
+ return unexpected(BAD_VALUE);
+ }
+ RETURN_IF_ERROR(convertRange(aidl.profiles.begin(), aidl.profiles.end(), legacy.audio_profiles,
+ aidl2legacy_AudioProfile_audio_profile));
+ legacy.num_audio_profiles = aidl.profiles.size();
+
+ if (aidl.gains.size() > std::size(legacy.gains)) {
+ return unexpected(BAD_VALUE);
+ }
+ RETURN_IF_ERROR(convertRange(aidl.gains.begin(), aidl.gains.end(), legacy.gains,
+ aidl2legacy_AudioGain_audio_gain));
+ legacy.num_gains = aidl.gains.size();
+
+ legacy.active_config = VALUE_OR_RETURN(
+ aidl2legacy_AudioPortConfig_audio_port_config(aidl.activeConfig));
+ legacy.ext = VALUE_OR_RETURN(aidl2legacy_AudioPortExt(aidl.ext, aidl.type));
+ return legacy;
+}
+
+ConversionResult<media::AudioPort>
+legacy2aidl_audio_port_v7_AudioPort(const audio_port_v7& legacy) {
+ media::AudioPort aidl;
+ aidl.id = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy.id));
+ aidl.role = VALUE_OR_RETURN(legacy2aidl_audio_port_role_t_AudioPortRole(legacy.role));
+ aidl.type = VALUE_OR_RETURN(legacy2aidl_audio_port_type_t_AudioPortType(legacy.type));
+ aidl.name = VALUE_OR_RETURN(legacy2aidl_string(legacy.name, sizeof(legacy.name)));
+
+ if (legacy.num_audio_profiles > std::size(legacy.audio_profiles)) {
+ return unexpected(BAD_VALUE);
+ }
+ RETURN_IF_ERROR(
+ convertRange(legacy.audio_profiles, legacy.audio_profiles + legacy.num_audio_profiles,
+ std::back_inserter(aidl.profiles),
+ legacy2aidl_audio_profile_AudioProfile));
+
+ if (legacy.num_gains > std::size(legacy.gains)) {
+ return unexpected(BAD_VALUE);
+ }
+ RETURN_IF_ERROR(
+ convertRange(legacy.gains, legacy.gains + legacy.num_gains,
+ std::back_inserter(aidl.gains),
+ legacy2aidl_audio_gain_AudioGain));
+
+ aidl.activeConfig = VALUE_OR_RETURN(
+ legacy2aidl_audio_port_config_AudioPortConfig(legacy.active_config));
+ aidl.ext = VALUE_OR_RETURN(legacy2aidl_AudioPortExt(legacy.ext, legacy.type));
+ return aidl;
+}
+
+ConversionResult<audio_mode_t>
+aidl2legacy_AudioMode_audio_mode_t(media::AudioMode aidl) {
+ switch (aidl) {
+ case media::AudioMode::INVALID:
+ return AUDIO_MODE_INVALID;
+ case media::AudioMode::CURRENT:
+ return AUDIO_MODE_CURRENT;
+ case media::AudioMode::NORMAL:
+ return AUDIO_MODE_NORMAL;
+ case media::AudioMode::RINGTONE:
+ return AUDIO_MODE_RINGTONE;
+ case media::AudioMode::IN_CALL:
+ return AUDIO_MODE_IN_CALL;
+ case media::AudioMode::IN_COMMUNICATION:
+ return AUDIO_MODE_IN_COMMUNICATION;
+ case media::AudioMode::CALL_SCREEN:
+ return AUDIO_MODE_CALL_SCREEN;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<media::AudioMode>
+legacy2aidl_audio_mode_t_AudioMode(audio_mode_t legacy) {
+ switch (legacy) {
+ case AUDIO_MODE_INVALID:
+ return media::AudioMode::INVALID;
+ case AUDIO_MODE_CURRENT:
+ return media::AudioMode::CURRENT;
+ case AUDIO_MODE_NORMAL:
+ return media::AudioMode::NORMAL;
+ case AUDIO_MODE_RINGTONE:
+ return media::AudioMode::RINGTONE;
+ case AUDIO_MODE_IN_CALL:
+ return media::AudioMode::IN_CALL;
+ case AUDIO_MODE_IN_COMMUNICATION:
+ return media::AudioMode::IN_COMMUNICATION;
+ case AUDIO_MODE_CALL_SCREEN:
+ return media::AudioMode::CALL_SCREEN;
+ case AUDIO_MODE_CNT:
+ break;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<audio_unique_id_use_t>
+aidl2legacy_AudioUniqueIdUse_audio_unique_id_use_t(media::AudioUniqueIdUse aidl) {
+ switch (aidl) {
+ case media::AudioUniqueIdUse::UNSPECIFIED:
+ return AUDIO_UNIQUE_ID_USE_UNSPECIFIED;
+ case media::AudioUniqueIdUse::SESSION:
+ return AUDIO_UNIQUE_ID_USE_SESSION;
+ case media::AudioUniqueIdUse::MODULE:
+ return AUDIO_UNIQUE_ID_USE_MODULE;
+ case media::AudioUniqueIdUse::EFFECT:
+ return AUDIO_UNIQUE_ID_USE_EFFECT;
+ case media::AudioUniqueIdUse::PATCH:
+ return AUDIO_UNIQUE_ID_USE_PATCH;
+ case media::AudioUniqueIdUse::OUTPUT:
+ return AUDIO_UNIQUE_ID_USE_OUTPUT;
+ case media::AudioUniqueIdUse::INPUT:
+ return AUDIO_UNIQUE_ID_USE_INPUT;
+ case media::AudioUniqueIdUse::CLIENT:
+ return AUDIO_UNIQUE_ID_USE_CLIENT;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<media::AudioUniqueIdUse>
+legacy2aidl_audio_unique_id_use_t_AudioUniqueIdUse(audio_unique_id_use_t legacy) {
+ switch (legacy) {
+ case AUDIO_UNIQUE_ID_USE_UNSPECIFIED:
+ return media::AudioUniqueIdUse::UNSPECIFIED;
+ case AUDIO_UNIQUE_ID_USE_SESSION:
+ return media::AudioUniqueIdUse::SESSION;
+ case AUDIO_UNIQUE_ID_USE_MODULE:
+ return media::AudioUniqueIdUse::MODULE;
+ case AUDIO_UNIQUE_ID_USE_EFFECT:
+ return media::AudioUniqueIdUse::EFFECT;
+ case AUDIO_UNIQUE_ID_USE_PATCH:
+ return media::AudioUniqueIdUse::PATCH;
+ case AUDIO_UNIQUE_ID_USE_OUTPUT:
+ return media::AudioUniqueIdUse::OUTPUT;
+ case AUDIO_UNIQUE_ID_USE_INPUT:
+ return media::AudioUniqueIdUse::INPUT;
+ case AUDIO_UNIQUE_ID_USE_CLIENT:
+ return media::AudioUniqueIdUse::CLIENT;
+ case AUDIO_UNIQUE_ID_USE_MAX:
+ break;
+ }
+ return unexpected(BAD_VALUE);
+}
+
} // namespace android
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index 299df96..64ec145 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -176,6 +176,11 @@
"com.android.media",
"com.android.media.swcodec",
],
+ target: {
+ darwin: {
+ enabled: false,
+ },
+ },
}
cc_library {
@@ -218,6 +223,11 @@
"signed-integer-overflow",
],
},
+ target: {
+ darwin: {
+ enabled: false,
+ },
+ },
}
// AIDL interface between libaudioclient and framework.jar
@@ -278,6 +288,7 @@
"aidl/android/media/AudioIoConfigEvent.aidl",
"aidl/android/media/AudioIoDescriptor.aidl",
"aidl/android/media/AudioIoFlags.aidl",
+ "aidl/android/media/AudioMixLatencyClass.aidl",
"aidl/android/media/AudioMode.aidl",
"aidl/android/media/AudioOffloadInfo.aidl",
"aidl/android/media/AudioOutputFlags.aidl",
@@ -290,7 +301,11 @@
"aidl/android/media/AudioPortConfigMixExt.aidl",
"aidl/android/media/AudioPortConfigMixExtUseCase.aidl",
"aidl/android/media/AudioPortConfigSessionExt.aidl",
+ "aidl/android/media/AudioPortDeviceExt.aidl",
+ "aidl/android/media/AudioPortExt.aidl",
+ "aidl/android/media/AudioPortMixExt.aidl",
"aidl/android/media/AudioPortRole.aidl",
+ "aidl/android/media/AudioPortSessionExt.aidl",
"aidl/android/media/AudioPortType.aidl",
"aidl/android/media/AudioProfile.aidl",
"aidl/android/media/AudioSourceType.aidl",
@@ -299,7 +314,6 @@
"aidl/android/media/AudioUniqueIdUse.aidl",
"aidl/android/media/AudioUsage.aidl",
"aidl/android/media/AudioUuid.aidl",
- "aidl/android/media/DeviceDescriptorBase.aidl",
"aidl/android/media/EffectDescriptor.aidl",
],
imports: [
@@ -333,7 +347,9 @@
"aidl/android/media/OpenInputResponse.aidl",
"aidl/android/media/OpenOutputRequest.aidl",
"aidl/android/media/OpenOutputResponse.aidl",
+ "aidl/android/media/RenderPosition.aidl",
+ "aidl/android/media/IAudioFlingerService.aidl",
"aidl/android/media/IAudioFlingerClient.aidl",
"aidl/android/media/IAudioRecord.aidl",
"aidl/android/media/IAudioTrack.aidl",
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index d163feb..72c65c1 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -71,7 +71,7 @@
sp<IServiceManager> sm = defaultServiceManager();
sp<IBinder> binder;
do {
- binder = sm->getService(String16("media.audio_flinger"));
+ binder = sm->getService(String16(IAudioFlinger::DEFAULT_SERVICE_NAME));
if (binder != 0)
break;
ALOGW("AudioFlinger not published, waiting...");
@@ -83,7 +83,8 @@
reportNoError = true;
}
binder->linkToDeath(gAudioFlingerClient);
- gAudioFlinger = interface_cast<IAudioFlinger>(binder);
+ gAudioFlinger = new AudioFlingerClientAdapter(
+ interface_cast<media::IAudioFlingerService>(binder));
LOG_ALWAYS_FATAL_IF(gAudioFlinger == 0);
afc = gAudioFlingerClient;
// Make sure callbacks can be received by gAudioFlingerClient
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 5dfda09..a7cca45 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -24,79 +24,33 @@
#include <binder/IPCThreadState.h>
#include <binder/Parcel.h>
-#include <media/AudioValidator.h>
-#include <media/IAudioPolicyService.h>
-#include <mediautils/ServiceUtilities.h>
-#include <mediautils/TimeCheck.h>
#include "IAudioFlinger.h"
namespace android {
-enum {
- CREATE_TRACK = IBinder::FIRST_CALL_TRANSACTION,
- CREATE_RECORD,
- SAMPLE_RATE,
- RESERVED, // obsolete, was CHANNEL_COUNT
- FORMAT,
- FRAME_COUNT,
- LATENCY,
- SET_MASTER_VOLUME,
- SET_MASTER_MUTE,
- MASTER_VOLUME,
- MASTER_MUTE,
- SET_STREAM_VOLUME,
- SET_STREAM_MUTE,
- STREAM_VOLUME,
- STREAM_MUTE,
- SET_MODE,
- SET_MIC_MUTE,
- GET_MIC_MUTE,
- SET_RECORD_SILENCED,
- SET_PARAMETERS,
- GET_PARAMETERS,
- REGISTER_CLIENT,
- GET_INPUTBUFFERSIZE,
- OPEN_OUTPUT,
- OPEN_DUPLICATE_OUTPUT,
- CLOSE_OUTPUT,
- SUSPEND_OUTPUT,
- RESTORE_OUTPUT,
- OPEN_INPUT,
- CLOSE_INPUT,
- INVALIDATE_STREAM,
- SET_VOICE_VOLUME,
- GET_RENDER_POSITION,
- GET_INPUT_FRAMES_LOST,
- NEW_AUDIO_UNIQUE_ID,
- ACQUIRE_AUDIO_SESSION_ID,
- RELEASE_AUDIO_SESSION_ID,
- QUERY_NUM_EFFECTS,
- QUERY_EFFECT,
- GET_EFFECT_DESCRIPTOR,
- CREATE_EFFECT,
- MOVE_EFFECTS,
- LOAD_HW_MODULE,
- GET_PRIMARY_OUTPUT_SAMPLING_RATE,
- GET_PRIMARY_OUTPUT_FRAME_COUNT,
- SET_LOW_RAM_DEVICE,
- LIST_AUDIO_PORTS,
- GET_AUDIO_PORT,
- CREATE_AUDIO_PATCH,
- RELEASE_AUDIO_PATCH,
- LIST_AUDIO_PATCHES,
- SET_AUDIO_PORT_CONFIG,
- GET_AUDIO_HW_SYNC_FOR_SESSION,
- SYSTEM_READY,
- FRAME_COUNT_HAL,
- GET_MICROPHONES,
- SET_MASTER_BALANCE,
- GET_MASTER_BALANCE,
- SET_EFFECT_SUSPENDED,
- SET_AUDIO_HAL_PIDS
-};
+using binder::Status;
#define MAX_ITEMS_PER_LIST 1024
+#define VALUE_OR_RETURN_BINDER(x) \
+ ({ \
+ auto _tmp = (x); \
+ if (!_tmp.ok()) return Status::fromStatusT(_tmp.error()); \
+ std::move(_tmp.value()); \
+ })
+
+#define RETURN_STATUS_IF_ERROR(x) \
+ { \
+ auto _tmp = (x); \
+ if (_tmp != OK) return _tmp; \
+ }
+
+#define RETURN_BINDER_IF_ERROR(x) \
+ { \
+ auto _tmp = (x); \
+ if (_tmp != OK) return Status::fromStatusT(_tmp); \
+ }
+
ConversionResult<media::CreateTrackRequest> IAudioFlinger::CreateTrackInput::toAidl() const {
media::CreateTrackRequest aidl;
aidl.attr = VALUE_OR_RETURN(legacy2aidl_audio_attributes_t_AudioAttributesInternal(attr));
@@ -252,1387 +206,960 @@
return legacy;
}
-class BpAudioFlinger : public BpInterface<IAudioFlinger>
-{
-public:
- explicit BpAudioFlinger(const sp<IBinder>& impl)
- : BpInterface<IAudioFlinger>(impl)
- {
- }
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// AudioFlingerClientAdapter
- virtual status_t createTrack(const media::CreateTrackRequest& input,
- media::CreateTrackResponse& output)
- {
- Parcel data, reply;
- status_t status;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeParcelable(input);
+AudioFlingerClientAdapter::AudioFlingerClientAdapter(
+ const sp<media::IAudioFlingerService> delegate) : mDelegate(delegate) {}
- status_t lStatus = remote()->transact(CREATE_TRACK, data, &reply);
- if (lStatus != NO_ERROR) {
- ALOGE("createTrack transaction error %d", lStatus);
- return DEAD_OBJECT;
- }
- status = reply.readInt32();
- if (status != NO_ERROR) {
- ALOGE("createTrack returned error %d", status);
- return status;
- }
- output.readFromParcel(&reply);
- if (output.audioTrack == 0) {
- ALOGE("createTrack returned an NULL IAudioTrack with status OK");
- return DEAD_OBJECT;
- }
- return OK;
- }
-
- virtual status_t createRecord(const media::CreateRecordRequest& input,
- media::CreateRecordResponse& output)
- {
- Parcel data, reply;
- status_t status;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-
- data.writeParcelable(input);
-
- status_t lStatus = remote()->transact(CREATE_RECORD, data, &reply);
- if (lStatus != NO_ERROR) {
- ALOGE("createRecord transaction error %d", lStatus);
- return DEAD_OBJECT;
- }
- status = reply.readInt32();
- if (status != NO_ERROR) {
- ALOGE("createRecord returned error %d", status);
- return status;
- }
-
- output.readFromParcel(&reply);
- if (output.audioRecord == 0) {
- ALOGE("createRecord returned a NULL IAudioRecord with status OK");
- return DEAD_OBJECT;
- }
- return OK;
- }
-
- virtual uint32_t sampleRate(audio_io_handle_t ioHandle) const
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32((int32_t) ioHandle);
- remote()->transact(SAMPLE_RATE, data, &reply);
- return reply.readInt32();
- }
-
- // RESERVED for channelCount()
-
- virtual audio_format_t format(audio_io_handle_t output) const
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32((int32_t) output);
- remote()->transact(FORMAT, data, &reply);
- return (audio_format_t) reply.readInt32();
- }
-
- virtual size_t frameCount(audio_io_handle_t ioHandle) const
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32((int32_t) ioHandle);
- remote()->transact(FRAME_COUNT, data, &reply);
- return reply.readInt64();
- }
-
- virtual uint32_t latency(audio_io_handle_t output) const
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32((int32_t) output);
- remote()->transact(LATENCY, data, &reply);
- return reply.readInt32();
- }
-
- virtual status_t setMasterVolume(float value)
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeFloat(value);
- remote()->transact(SET_MASTER_VOLUME, data, &reply);
- return reply.readInt32();
- }
-
- virtual status_t setMasterMute(bool muted)
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(muted);
- remote()->transact(SET_MASTER_MUTE, data, &reply);
- return reply.readInt32();
- }
-
- virtual float masterVolume() const
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- remote()->transact(MASTER_VOLUME, data, &reply);
- return reply.readFloat();
- }
-
- virtual bool masterMute() const
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- remote()->transact(MASTER_MUTE, data, &reply);
- return reply.readInt32();
- }
-
- status_t setMasterBalance(float balance) override
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeFloat(balance);
- status_t status = remote()->transact(SET_MASTER_BALANCE, data, &reply);
- if (status != NO_ERROR) {
- return status;
- }
- return reply.readInt32();
- }
-
- status_t getMasterBalance(float *balance) const override
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- status_t status = remote()->transact(GET_MASTER_BALANCE, data, &reply);
- if (status != NO_ERROR) {
- return status;
- }
- status = (status_t)reply.readInt32();
- if (status != NO_ERROR) {
- return status;
- }
- *balance = reply.readFloat();
- return NO_ERROR;
- }
-
- virtual status_t setStreamVolume(audio_stream_type_t stream, float value,
- audio_io_handle_t output)
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32((int32_t) stream);
- data.writeFloat(value);
- data.writeInt32((int32_t) output);
- remote()->transact(SET_STREAM_VOLUME, data, &reply);
- return reply.readInt32();
- }
-
- virtual status_t setStreamMute(audio_stream_type_t stream, bool muted)
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32((int32_t) stream);
- data.writeInt32(muted);
- remote()->transact(SET_STREAM_MUTE, data, &reply);
- return reply.readInt32();
- }
-
- virtual float streamVolume(audio_stream_type_t stream, audio_io_handle_t output) const
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32((int32_t) stream);
- data.writeInt32((int32_t) output);
- remote()->transact(STREAM_VOLUME, data, &reply);
- return reply.readFloat();
- }
-
- virtual bool streamMute(audio_stream_type_t stream) const
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32((int32_t) stream);
- remote()->transact(STREAM_MUTE, data, &reply);
- return reply.readInt32();
- }
-
- virtual status_t setMode(audio_mode_t mode)
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(mode);
- remote()->transact(SET_MODE, data, &reply);
- return reply.readInt32();
- }
-
- virtual status_t setMicMute(bool state)
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(state);
- remote()->transact(SET_MIC_MUTE, data, &reply);
- return reply.readInt32();
- }
-
- virtual bool getMicMute() const
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- remote()->transact(GET_MIC_MUTE, data, &reply);
- return reply.readInt32();
- }
-
- virtual void setRecordSilenced(audio_port_handle_t portId, bool silenced)
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(portId);
- data.writeInt32(silenced ? 1 : 0);
- remote()->transact(SET_RECORD_SILENCED, data, &reply);
- }
-
- virtual status_t setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs)
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32((int32_t) ioHandle);
- data.writeString8(keyValuePairs);
- remote()->transact(SET_PARAMETERS, data, &reply);
- return reply.readInt32();
- }
-
- virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys) const
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32((int32_t) ioHandle);
- data.writeString8(keys);
- remote()->transact(GET_PARAMETERS, data, &reply);
- return reply.readString8();
- }
-
- virtual void registerClient(const sp<media::IAudioFlingerClient>& client)
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeStrongBinder(IInterface::asBinder(client));
- remote()->transact(REGISTER_CLIENT, data, &reply);
- }
-
- virtual size_t getInputBufferSize(uint32_t sampleRate, audio_format_t format,
- audio_channel_mask_t channelMask) const
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(sampleRate);
- data.writeInt32(format);
- data.writeInt32(channelMask);
- remote()->transact(GET_INPUTBUFFERSIZE, data, &reply);
- return reply.readInt64();
- }
-
- virtual status_t openOutput(const media::OpenOutputRequest& request,
- media::OpenOutputResponse* response)
- {
- status_t status;
- Parcel data, reply;
- return data.writeParcelable(request)
- ?: remote()->transact(OPEN_OUTPUT, data, &reply)
- ?: data.readInt32(&status)
- ?: status
- ?: data.readParcelable(response);
- }
-
- virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1,
- audio_io_handle_t output2)
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32((int32_t) output1);
- data.writeInt32((int32_t) output2);
- remote()->transact(OPEN_DUPLICATE_OUTPUT, data, &reply);
- return (audio_io_handle_t) reply.readInt32();
- }
-
- virtual status_t closeOutput(audio_io_handle_t output)
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32((int32_t) output);
- remote()->transact(CLOSE_OUTPUT, data, &reply);
- return reply.readInt32();
- }
-
- virtual status_t suspendOutput(audio_io_handle_t output)
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32((int32_t) output);
- remote()->transact(SUSPEND_OUTPUT, data, &reply);
- return reply.readInt32();
- }
-
- virtual status_t restoreOutput(audio_io_handle_t output)
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32((int32_t) output);
- remote()->transact(RESTORE_OUTPUT, data, &reply);
- return reply.readInt32();
- }
-
- virtual status_t openInput(const media::OpenInputRequest& request,
- media::OpenInputResponse* response)
- {
- Parcel data, reply;
- status_t status;
- return data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor())
- ?: data.writeParcelable(request)
- ?: remote()->transact(OPEN_INPUT, data, &reply)
- ?: reply.readInt32(&status)
- ?: status
- ?: reply.readParcelable(response);
- }
-
- virtual status_t closeInput(int input)
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(input);
- remote()->transact(CLOSE_INPUT, data, &reply);
- return reply.readInt32();
- }
-
- virtual status_t invalidateStream(audio_stream_type_t stream)
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32((int32_t) stream);
- remote()->transact(INVALIDATE_STREAM, data, &reply);
- return reply.readInt32();
- }
-
- virtual status_t setVoiceVolume(float volume)
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeFloat(volume);
- remote()->transact(SET_VOICE_VOLUME, data, &reply);
- return reply.readInt32();
- }
-
- virtual status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames,
- audio_io_handle_t output) const
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32((int32_t) output);
- remote()->transact(GET_RENDER_POSITION, data, &reply);
- status_t status = reply.readInt32();
- if (status == NO_ERROR) {
- uint32_t tmp = reply.readInt32();
- if (halFrames != NULL) {
- *halFrames = tmp;
- }
- tmp = reply.readInt32();
- if (dspFrames != NULL) {
- *dspFrames = tmp;
- }
- }
- return status;
- }
-
- virtual uint32_t getInputFramesLost(audio_io_handle_t ioHandle) const
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32((int32_t) ioHandle);
- status_t status = remote()->transact(GET_INPUT_FRAMES_LOST, data, &reply);
- if (status != NO_ERROR) {
- return 0;
- }
- return (uint32_t) reply.readInt32();
- }
-
- virtual audio_unique_id_t newAudioUniqueId(audio_unique_id_use_t use)
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32((int32_t) use);
- status_t status = remote()->transact(NEW_AUDIO_UNIQUE_ID, data, &reply);
- audio_unique_id_t id = AUDIO_UNIQUE_ID_ALLOCATE;
- if (status == NO_ERROR) {
- id = reply.readInt32();
- }
- return id;
- }
-
- void acquireAudioSessionId(audio_session_t audioSession, pid_t pid, uid_t uid) override
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(audioSession);
- data.writeInt32((int32_t)pid);
- data.writeInt32((int32_t)uid);
- remote()->transact(ACQUIRE_AUDIO_SESSION_ID, data, &reply);
- }
-
- virtual void releaseAudioSessionId(audio_session_t audioSession, int pid)
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(audioSession);
- data.writeInt32(pid);
- remote()->transact(RELEASE_AUDIO_SESSION_ID, data, &reply);
- }
-
- virtual status_t queryNumberEffects(uint32_t *numEffects) const
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- status_t status = remote()->transact(QUERY_NUM_EFFECTS, data, &reply);
- if (status != NO_ERROR) {
- return status;
- }
- status = reply.readInt32();
- if (status != NO_ERROR) {
- return status;
- }
- if (numEffects != NULL) {
- *numEffects = (uint32_t)reply.readInt32();
- }
- return NO_ERROR;
- }
-
- virtual status_t queryEffect(uint32_t index, effect_descriptor_t *pDescriptor) const
- {
- if (pDescriptor == NULL) {
- return BAD_VALUE;
- }
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(index);
- status_t status = remote()->transact(QUERY_EFFECT, data, &reply);
- if (status != NO_ERROR) {
- return status;
- }
- status = reply.readInt32();
- if (status != NO_ERROR) {
- return status;
- }
- reply.read(pDescriptor, sizeof(effect_descriptor_t));
- return NO_ERROR;
- }
-
- virtual status_t getEffectDescriptor(const effect_uuid_t *pUuid,
- const effect_uuid_t *pType,
- uint32_t preferredTypeFlag,
- effect_descriptor_t *pDescriptor) const
- {
- if (pUuid == NULL || pType == NULL || pDescriptor == NULL) {
- return BAD_VALUE;
- }
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.write(pUuid, sizeof(effect_uuid_t));
- data.write(pType, sizeof(effect_uuid_t));
- data.writeUint32(preferredTypeFlag);
- status_t status = remote()->transact(GET_EFFECT_DESCRIPTOR, data, &reply);
- if (status != NO_ERROR) {
- return status;
- }
- status = reply.readInt32();
- if (status != NO_ERROR) {
- return status;
- }
- reply.read(pDescriptor, sizeof(effect_descriptor_t));
- return NO_ERROR;
- }
-
- virtual status_t createEffect(const media::CreateEffectRequest& request,
- media::CreateEffectResponse* response)
- {
- Parcel data, reply;
- sp<media::IEffect> effect;
- if (response == nullptr) {
- return BAD_VALUE;
- }
- status_t status;
- status_t lStatus = data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor())
- ?: data.writeParcelable(request)
- ?: remote()->transact(CREATE_EFFECT, data, &reply)
- ?: reply.readInt32(&status)
- ?: reply.readParcelable(response)
- ?: status;
- if (lStatus != NO_ERROR) {
- ALOGE("createEffect error: %s", strerror(-lStatus));
- }
- return lStatus;
- }
-
- virtual status_t moveEffects(audio_session_t session, audio_io_handle_t srcOutput,
- audio_io_handle_t dstOutput)
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(session);
- data.writeInt32((int32_t) srcOutput);
- data.writeInt32((int32_t) dstOutput);
- remote()->transact(MOVE_EFFECTS, data, &reply);
- return reply.readInt32();
- }
-
- virtual void setEffectSuspended(int effectId,
- audio_session_t sessionId,
- bool suspended)
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(effectId);
- data.writeInt32(sessionId);
- data.writeInt32(suspended ? 1 : 0);
- remote()->transact(SET_EFFECT_SUSPENDED, data, &reply);
- }
-
- virtual audio_module_handle_t loadHwModule(const char *name)
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeCString(name);
- remote()->transact(LOAD_HW_MODULE, data, &reply);
- return (audio_module_handle_t) reply.readInt32();
- }
-
- virtual uint32_t getPrimaryOutputSamplingRate()
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- remote()->transact(GET_PRIMARY_OUTPUT_SAMPLING_RATE, data, &reply);
- return reply.readInt32();
- }
-
- virtual size_t getPrimaryOutputFrameCount()
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- remote()->transact(GET_PRIMARY_OUTPUT_FRAME_COUNT, data, &reply);
- return reply.readInt64();
- }
-
- virtual status_t setLowRamDevice(bool isLowRamDevice, int64_t totalMemory) override
- {
- Parcel data, reply;
-
- static_assert(NO_ERROR == 0, "NO_ERROR must be 0");
- return data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor())
- ?: data.writeInt32((int) isLowRamDevice)
- ?: data.writeInt64(totalMemory)
- ?: remote()->transact(SET_LOW_RAM_DEVICE, data, &reply)
- ?: reply.readInt32();
- }
-
- virtual status_t listAudioPorts(unsigned int *num_ports,
- struct audio_port *ports)
- {
- if (num_ports == NULL || *num_ports == 0 || ports == NULL) {
- return BAD_VALUE;
- }
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(*num_ports);
- status_t status = remote()->transact(LIST_AUDIO_PORTS, data, &reply);
- if (status != NO_ERROR ||
- (status = (status_t)reply.readInt32()) != NO_ERROR) {
- return status;
- }
- *num_ports = (unsigned int)reply.readInt32();
- reply.read(ports, *num_ports * sizeof(struct audio_port));
- return status;
- }
- virtual status_t getAudioPort(struct audio_port_v7 *port)
- {
- if (port == nullptr) {
- return BAD_VALUE;
- }
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.write(port, sizeof(struct audio_port_v7));
- status_t status = remote()->transact(GET_AUDIO_PORT, data, &reply);
- if (status != NO_ERROR ||
- (status = (status_t)reply.readInt32()) != NO_ERROR) {
- return status;
- }
- reply.read(port, sizeof(struct audio_port));
- return status;
- }
- virtual status_t createAudioPatch(const struct audio_patch *patch,
- audio_patch_handle_t *handle)
- {
- if (patch == NULL || handle == NULL) {
- return BAD_VALUE;
- }
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.write(patch, sizeof(struct audio_patch));
- data.write(handle, sizeof(audio_patch_handle_t));
- status_t status = remote()->transact(CREATE_AUDIO_PATCH, data, &reply);
- if (status != NO_ERROR ||
- (status = (status_t)reply.readInt32()) != NO_ERROR) {
- return status;
- }
- reply.read(handle, sizeof(audio_patch_handle_t));
- return status;
- }
- virtual status_t releaseAudioPatch(audio_patch_handle_t handle)
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.write(&handle, sizeof(audio_patch_handle_t));
- status_t status = remote()->transact(RELEASE_AUDIO_PATCH, data, &reply);
- if (status != NO_ERROR) {
- status = (status_t)reply.readInt32();
- }
- return status;
- }
- virtual status_t listAudioPatches(unsigned int *num_patches,
- struct audio_patch *patches)
- {
- if (num_patches == NULL || *num_patches == 0 || patches == NULL) {
- return BAD_VALUE;
- }
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(*num_patches);
- status_t status = remote()->transact(LIST_AUDIO_PATCHES, data, &reply);
- if (status != NO_ERROR ||
- (status = (status_t)reply.readInt32()) != NO_ERROR) {
- return status;
- }
- *num_patches = (unsigned int)reply.readInt32();
- reply.read(patches, *num_patches * sizeof(struct audio_patch));
- return status;
- }
- virtual status_t setAudioPortConfig(const struct audio_port_config *config)
- {
- if (config == NULL) {
- return BAD_VALUE;
- }
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.write(config, sizeof(struct audio_port_config));
- status_t status = remote()->transact(SET_AUDIO_PORT_CONFIG, data, &reply);
- if (status != NO_ERROR) {
- status = (status_t)reply.readInt32();
- }
- return status;
- }
- virtual audio_hw_sync_t getAudioHwSyncForSession(audio_session_t sessionId)
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(sessionId);
- status_t status = remote()->transact(GET_AUDIO_HW_SYNC_FOR_SESSION, data, &reply);
- if (status != NO_ERROR) {
- return AUDIO_HW_SYNC_INVALID;
- }
- return (audio_hw_sync_t)reply.readInt32();
- }
- virtual status_t systemReady()
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- return remote()->transact(SYSTEM_READY, data, &reply, IBinder::FLAG_ONEWAY);
- }
- virtual size_t frameCountHAL(audio_io_handle_t ioHandle) const
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32((int32_t) ioHandle);
- status_t status = remote()->transact(FRAME_COUNT_HAL, data, &reply);
- if (status != NO_ERROR) {
- return 0;
- }
- return reply.readInt64();
- }
- virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones)
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- status_t status = remote()->transact(GET_MICROPHONES, data, &reply);
- if (status != NO_ERROR ||
- (status = (status_t)reply.readInt32()) != NO_ERROR) {
- return status;
- }
- status = reply.readParcelableVector(microphones);
- return status;
- }
- virtual status_t setAudioHalPids(const std::vector<pid_t>& pids)
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(pids.size());
- for (auto pid : pids) {
- data.writeInt32(pid);
- }
- status_t status = remote()->transact(SET_AUDIO_HAL_PIDS, data, &reply);
- if (status != NO_ERROR) {
- return status;
- }
- return static_cast <status_t> (reply.readInt32());
- }
-};
-
-IMPLEMENT_META_INTERFACE(AudioFlinger, "android.media.IAudioFlinger");
-
-// ----------------------------------------------------------------------
-
-status_t BnAudioFlinger::onTransact(
- uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
-{
- // make sure transactions reserved to AudioPolicyManager do not come from other processes
- switch (code) {
- case SET_STREAM_VOLUME:
- case SET_STREAM_MUTE:
- case OPEN_OUTPUT:
- case OPEN_DUPLICATE_OUTPUT:
- case CLOSE_OUTPUT:
- case SUSPEND_OUTPUT:
- case RESTORE_OUTPUT:
- case OPEN_INPUT:
- case CLOSE_INPUT:
- case INVALIDATE_STREAM:
- case SET_VOICE_VOLUME:
- case MOVE_EFFECTS:
- case SET_EFFECT_SUSPENDED:
- case LOAD_HW_MODULE:
- case LIST_AUDIO_PORTS:
- case GET_AUDIO_PORT:
- case CREATE_AUDIO_PATCH:
- case RELEASE_AUDIO_PATCH:
- case LIST_AUDIO_PATCHES:
- case SET_AUDIO_PORT_CONFIG:
- case SET_RECORD_SILENCED:
- ALOGW("%s: transaction %d received from PID %d",
- __func__, code, IPCThreadState::self()->getCallingPid());
- // return status only for non void methods
- switch (code) {
- case SET_RECORD_SILENCED:
- case SET_EFFECT_SUSPENDED:
- break;
- default:
- reply->writeInt32(static_cast<int32_t> (INVALID_OPERATION));
- break;
- }
- return OK;
- default:
- break;
- }
-
- // make sure the following transactions come from system components
- switch (code) {
- case SET_MASTER_VOLUME:
- case SET_MASTER_MUTE:
- case SET_MODE:
- case SET_MIC_MUTE:
- case SET_LOW_RAM_DEVICE:
- case SYSTEM_READY:
- case SET_AUDIO_HAL_PIDS: {
- if (!isServiceUid(IPCThreadState::self()->getCallingUid())) {
- ALOGW("%s: transaction %d received from PID %d unauthorized UID %d",
- __func__, code, IPCThreadState::self()->getCallingPid(),
- IPCThreadState::self()->getCallingUid());
- // return status only for non void methods
- switch (code) {
- case SYSTEM_READY:
- break;
- default:
- reply->writeInt32(static_cast<int32_t> (INVALID_OPERATION));
- break;
- }
- return OK;
- }
- } break;
- default:
- break;
- }
-
- // List of relevant events that trigger log merging.
- // Log merging should activate during audio activity of any kind. This are considered the
- // most relevant events.
- // TODO should select more wisely the items from the list
- switch (code) {
- case CREATE_TRACK:
- case CREATE_RECORD:
- case SET_MASTER_VOLUME:
- case SET_MASTER_MUTE:
- case SET_MIC_MUTE:
- case SET_PARAMETERS:
- case CREATE_EFFECT:
- case SYSTEM_READY: {
- requestLogMerge();
- break;
- }
- default:
- break;
- }
-
- std::string tag("IAudioFlinger command " + std::to_string(code));
- TimeCheck check(tag.c_str());
-
- // Make sure we connect to Audio Policy Service before calling into AudioFlinger:
- // - AudioFlinger can call into Audio Policy Service with its global mutex held
- // - If this is the first time Audio Policy Service is queried from inside audioserver process
- // this will trigger Audio Policy Manager initialization.
- // - Audio Policy Manager initialization calls into AudioFlinger which will try to lock
- // its global mutex and a deadlock will occur.
- if (IPCThreadState::self()->getCallingPid() != getpid()) {
- AudioSystem::get_audio_policy_service();
- }
-
- switch (code) {
- case CREATE_TRACK: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
-
- media::CreateTrackRequest input;
- if (data.readParcelable(&input) != NO_ERROR) {
- reply->writeInt32(DEAD_OBJECT);
- return NO_ERROR;
- }
-
- status_t status;
- media::CreateTrackResponse output;
-
- status = createTrack(input, output);
-
- LOG_ALWAYS_FATAL_IF((output.audioTrack != 0) != (status == NO_ERROR));
- reply->writeInt32(status);
- if (status != NO_ERROR) {
- return NO_ERROR;
- }
- output.writeToParcel(reply);
- return NO_ERROR;
- } break;
- case CREATE_RECORD: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
-
- media::CreateRecordRequest input;
- if (data.readParcelable(&input) != NO_ERROR) {
- reply->writeInt32(DEAD_OBJECT);
- return NO_ERROR;
- }
-
- status_t status;
- media::CreateRecordResponse output;
-
- status = createRecord(input, output);
-
- LOG_ALWAYS_FATAL_IF((output.audioRecord != 0) != (status == NO_ERROR));
- reply->writeInt32(status);
- if (status != NO_ERROR) {
- return NO_ERROR;
- }
- output.writeToParcel(reply);
- return NO_ERROR;
- } break;
- case SAMPLE_RATE: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- reply->writeInt32( sampleRate((audio_io_handle_t) data.readInt32()) );
- return NO_ERROR;
- } break;
-
- // RESERVED for channelCount()
-
- case FORMAT: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- reply->writeInt32( format((audio_io_handle_t) data.readInt32()) );
- return NO_ERROR;
- } break;
- case FRAME_COUNT: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- reply->writeInt64( frameCount((audio_io_handle_t) data.readInt32()) );
- return NO_ERROR;
- } break;
- case LATENCY: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- reply->writeInt32( latency((audio_io_handle_t) data.readInt32()) );
- return NO_ERROR;
- } break;
- case SET_MASTER_VOLUME: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- reply->writeInt32( setMasterVolume(data.readFloat()) );
- return NO_ERROR;
- } break;
- case SET_MASTER_MUTE: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- reply->writeInt32( setMasterMute(data.readInt32()) );
- return NO_ERROR;
- } break;
- case MASTER_VOLUME: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- reply->writeFloat( masterVolume() );
- return NO_ERROR;
- } break;
- case MASTER_MUTE: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- reply->writeInt32( masterMute() );
- return NO_ERROR;
- } break;
- case SET_MASTER_BALANCE: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- reply->writeInt32( setMasterBalance(data.readFloat()) );
- return NO_ERROR;
- } break;
- case GET_MASTER_BALANCE: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- float f;
- const status_t status = getMasterBalance(&f);
- reply->writeInt32((int32_t)status);
- if (status == NO_ERROR) {
- (void)reply->writeFloat(f);
- }
- return NO_ERROR;
- } break;
- case SET_STREAM_VOLUME: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- int stream = data.readInt32();
- float volume = data.readFloat();
- audio_io_handle_t output = (audio_io_handle_t) data.readInt32();
- reply->writeInt32( setStreamVolume((audio_stream_type_t) stream, volume, output) );
- return NO_ERROR;
- } break;
- case SET_STREAM_MUTE: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- int stream = data.readInt32();
- reply->writeInt32( setStreamMute((audio_stream_type_t) stream, data.readInt32()) );
- return NO_ERROR;
- } break;
- case STREAM_VOLUME: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- int stream = data.readInt32();
- int output = data.readInt32();
- reply->writeFloat( streamVolume((audio_stream_type_t) stream, output) );
- return NO_ERROR;
- } break;
- case STREAM_MUTE: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- int stream = data.readInt32();
- reply->writeInt32( streamMute((audio_stream_type_t) stream) );
- return NO_ERROR;
- } break;
- case SET_MODE: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- audio_mode_t mode = (audio_mode_t) data.readInt32();
- reply->writeInt32( setMode(mode) );
- return NO_ERROR;
- } break;
- case SET_MIC_MUTE: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- int state = data.readInt32();
- reply->writeInt32( setMicMute(state) );
- return NO_ERROR;
- } break;
- case GET_MIC_MUTE: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- reply->writeInt32( getMicMute() );
- return NO_ERROR;
- } break;
- case SET_RECORD_SILENCED: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- audio_port_handle_t portId = data.readInt32();
- bool silenced = data.readInt32() == 1;
- setRecordSilenced(portId, silenced);
- return NO_ERROR;
- } break;
- case SET_PARAMETERS: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- audio_io_handle_t ioHandle = (audio_io_handle_t) data.readInt32();
- String8 keyValuePairs(data.readString8());
- reply->writeInt32(setParameters(ioHandle, keyValuePairs));
- return NO_ERROR;
- } break;
- case GET_PARAMETERS: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- audio_io_handle_t ioHandle = (audio_io_handle_t) data.readInt32();
- String8 keys(data.readString8());
- reply->writeString8(getParameters(ioHandle, keys));
- return NO_ERROR;
- } break;
-
- case REGISTER_CLIENT: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- sp<media::IAudioFlingerClient> client = interface_cast<media::IAudioFlingerClient>(
- data.readStrongBinder());
- registerClient(client);
- return NO_ERROR;
- } break;
- case GET_INPUTBUFFERSIZE: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- uint32_t sampleRate = data.readInt32();
- audio_format_t format = (audio_format_t) data.readInt32();
- audio_channel_mask_t channelMask = (audio_channel_mask_t) data.readInt32();
- reply->writeInt64( getInputBufferSize(sampleRate, format, channelMask) );
- return NO_ERROR;
- } break;
- case OPEN_OUTPUT: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- status_t status;
- media::OpenOutputRequest request;
- media::OpenOutputResponse response;
- return data.readParcelable(&request)
- ?: (status = openOutput(request, &response), OK)
- ?: reply->writeInt32(status)
- ?: reply->writeParcelable(response);
- } break;
- case OPEN_DUPLICATE_OUTPUT: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- audio_io_handle_t output1 = (audio_io_handle_t) data.readInt32();
- audio_io_handle_t output2 = (audio_io_handle_t) data.readInt32();
- reply->writeInt32((int32_t) openDuplicateOutput(output1, output2));
- return NO_ERROR;
- } break;
- case CLOSE_OUTPUT: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- reply->writeInt32(closeOutput((audio_io_handle_t) data.readInt32()));
- return NO_ERROR;
- } break;
- case SUSPEND_OUTPUT: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- reply->writeInt32(suspendOutput((audio_io_handle_t) data.readInt32()));
- return NO_ERROR;
- } break;
- case RESTORE_OUTPUT: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- reply->writeInt32(restoreOutput((audio_io_handle_t) data.readInt32()));
- return NO_ERROR;
- } break;
- case OPEN_INPUT: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- media::OpenInputRequest request;
- media::OpenInputResponse response;
- status_t status;
- return data.readParcelable(&request)
- ?: (status = openInput(request, &response), OK)
- ?: reply->writeInt32(status)
- ?: reply->writeParcelable(response);
- } break;
- case CLOSE_INPUT: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- reply->writeInt32(closeInput((audio_io_handle_t) data.readInt32()));
- return NO_ERROR;
- } break;
- case INVALIDATE_STREAM: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- audio_stream_type_t stream = (audio_stream_type_t) data.readInt32();
- reply->writeInt32(invalidateStream(stream));
- return NO_ERROR;
- } break;
- case SET_VOICE_VOLUME: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- float volume = data.readFloat();
- reply->writeInt32( setVoiceVolume(volume) );
- return NO_ERROR;
- } break;
- case GET_RENDER_POSITION: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- audio_io_handle_t output = (audio_io_handle_t) data.readInt32();
- uint32_t halFrames = 0;
- uint32_t dspFrames = 0;
- status_t status = getRenderPosition(&halFrames, &dspFrames, output);
- reply->writeInt32(status);
- if (status == NO_ERROR) {
- reply->writeInt32(halFrames);
- reply->writeInt32(dspFrames);
- }
- return NO_ERROR;
- }
- case GET_INPUT_FRAMES_LOST: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- audio_io_handle_t ioHandle = (audio_io_handle_t) data.readInt32();
- reply->writeInt32((int32_t) getInputFramesLost(ioHandle));
- return NO_ERROR;
- } break;
- case NEW_AUDIO_UNIQUE_ID: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- reply->writeInt32(newAudioUniqueId((audio_unique_id_use_t) data.readInt32()));
- return NO_ERROR;
- } break;
- case ACQUIRE_AUDIO_SESSION_ID: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- audio_session_t audioSession = (audio_session_t) data.readInt32();
- const pid_t pid = (pid_t)data.readInt32();
- const uid_t uid = (uid_t)data.readInt32();
- acquireAudioSessionId(audioSession, pid, uid);
- return NO_ERROR;
- } break;
- case RELEASE_AUDIO_SESSION_ID: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- audio_session_t audioSession = (audio_session_t) data.readInt32();
- int pid = data.readInt32();
- releaseAudioSessionId(audioSession, pid);
- return NO_ERROR;
- } break;
- case QUERY_NUM_EFFECTS: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- uint32_t numEffects = 0;
- status_t status = queryNumberEffects(&numEffects);
- reply->writeInt32(status);
- if (status == NO_ERROR) {
- reply->writeInt32((int32_t)numEffects);
- }
- return NO_ERROR;
- }
- case QUERY_EFFECT: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- effect_descriptor_t desc = {};
- status_t status = queryEffect(data.readInt32(), &desc);
- reply->writeInt32(status);
- if (status == NO_ERROR) {
- reply->write(&desc, sizeof(effect_descriptor_t));
- }
- return NO_ERROR;
- }
- case GET_EFFECT_DESCRIPTOR: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- effect_uuid_t uuid = {};
- if (data.read(&uuid, sizeof(effect_uuid_t)) != NO_ERROR) {
- android_errorWriteLog(0x534e4554, "139417189");
- }
- effect_uuid_t type = {};
- if (data.read(&type, sizeof(effect_uuid_t)) != NO_ERROR) {
- android_errorWriteLog(0x534e4554, "139417189");
- }
- uint32_t preferredTypeFlag = data.readUint32();
- effect_descriptor_t desc = {};
- status_t status = getEffectDescriptor(&uuid, &type, preferredTypeFlag, &desc);
- reply->writeInt32(status);
- if (status == NO_ERROR) {
- reply->write(&desc, sizeof(effect_descriptor_t));
- }
- return NO_ERROR;
- }
- case CREATE_EFFECT: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
-
- media::CreateEffectRequest request;
- media::CreateEffectResponse response;
-
- return data.readParcelable(&request)
- ?: reply->writeInt32(createEffect(request, &response))
- ?: reply->writeParcelable(response);
- } break;
- case MOVE_EFFECTS: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- audio_session_t session = (audio_session_t) data.readInt32();
- audio_io_handle_t srcOutput = (audio_io_handle_t) data.readInt32();
- audio_io_handle_t dstOutput = (audio_io_handle_t) data.readInt32();
- reply->writeInt32(moveEffects(session, srcOutput, dstOutput));
- return NO_ERROR;
- } break;
- case SET_EFFECT_SUSPENDED: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- int effectId = data.readInt32();
- audio_session_t sessionId = (audio_session_t) data.readInt32();
- bool suspended = data.readInt32() == 1;
- setEffectSuspended(effectId, sessionId, suspended);
- return NO_ERROR;
- } break;
- case LOAD_HW_MODULE: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- reply->writeInt32(loadHwModule(data.readCString()));
- return NO_ERROR;
- } break;
- case GET_PRIMARY_OUTPUT_SAMPLING_RATE: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- reply->writeInt32(getPrimaryOutputSamplingRate());
- return NO_ERROR;
- } break;
- case GET_PRIMARY_OUTPUT_FRAME_COUNT: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- reply->writeInt64(getPrimaryOutputFrameCount());
- return NO_ERROR;
- } break;
- case SET_LOW_RAM_DEVICE: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- int32_t isLowRamDevice;
- int64_t totalMemory;
- const status_t status =
- data.readInt32(&isLowRamDevice) ?:
- data.readInt64(&totalMemory) ?:
- setLowRamDevice(isLowRamDevice != 0, totalMemory);
- (void)reply->writeInt32(status);
- return NO_ERROR;
- } break;
- case LIST_AUDIO_PORTS: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- unsigned int numPortsReq = data.readInt32();
- if (numPortsReq > MAX_ITEMS_PER_LIST) {
- numPortsReq = MAX_ITEMS_PER_LIST;
- }
- unsigned int numPorts = numPortsReq;
- struct audio_port *ports =
- (struct audio_port *)calloc(numPortsReq,
- sizeof(struct audio_port));
- if (ports == NULL) {
- reply->writeInt32(NO_MEMORY);
- reply->writeInt32(0);
- return NO_ERROR;
- }
- status_t status = listAudioPorts(&numPorts, ports);
- reply->writeInt32(status);
- reply->writeInt32(numPorts);
- if (status == NO_ERROR) {
- if (numPortsReq > numPorts) {
- numPortsReq = numPorts;
- }
- reply->write(ports, numPortsReq * sizeof(struct audio_port));
- }
- free(ports);
- return NO_ERROR;
- } break;
- case GET_AUDIO_PORT: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- struct audio_port_v7 port = {};
- status_t status = data.read(&port, sizeof(struct audio_port));
- if (status != NO_ERROR) {
- ALOGE("b/23905951");
- return status;
- }
- status = AudioValidator::validateAudioPort(port);
- if (status == NO_ERROR) {
- status = getAudioPort(&port);
- }
- reply->writeInt32(status);
- if (status == NO_ERROR) {
- reply->write(&port, sizeof(struct audio_port_v7));
- }
- return NO_ERROR;
- } break;
- case CREATE_AUDIO_PATCH: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- struct audio_patch patch;
- status_t status = data.read(&patch, sizeof(struct audio_patch));
- if (status != NO_ERROR) {
- return status;
- }
- audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
- status = data.read(&handle, sizeof(audio_patch_handle_t));
- if (status != NO_ERROR) {
- ALOGE("b/23905951");
- return status;
- }
- status = AudioValidator::validateAudioPatch(patch);
- if (status == NO_ERROR) {
- status = createAudioPatch(&patch, &handle);
- }
- reply->writeInt32(status);
- if (status == NO_ERROR) {
- reply->write(&handle, sizeof(audio_patch_handle_t));
- }
- return NO_ERROR;
- } break;
- case RELEASE_AUDIO_PATCH: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- audio_patch_handle_t handle;
- data.read(&handle, sizeof(audio_patch_handle_t));
- status_t status = releaseAudioPatch(handle);
- reply->writeInt32(status);
- return NO_ERROR;
- } break;
- case LIST_AUDIO_PATCHES: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- unsigned int numPatchesReq = data.readInt32();
- if (numPatchesReq > MAX_ITEMS_PER_LIST) {
- numPatchesReq = MAX_ITEMS_PER_LIST;
- }
- unsigned int numPatches = numPatchesReq;
- struct audio_patch *patches =
- (struct audio_patch *)calloc(numPatchesReq,
- sizeof(struct audio_patch));
- if (patches == NULL) {
- reply->writeInt32(NO_MEMORY);
- reply->writeInt32(0);
- return NO_ERROR;
- }
- status_t status = listAudioPatches(&numPatches, patches);
- reply->writeInt32(status);
- reply->writeInt32(numPatches);
- if (status == NO_ERROR) {
- if (numPatchesReq > numPatches) {
- numPatchesReq = numPatches;
- }
- reply->write(patches, numPatchesReq * sizeof(struct audio_patch));
- }
- free(patches);
- return NO_ERROR;
- } break;
- case SET_AUDIO_PORT_CONFIG: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- struct audio_port_config config;
- status_t status = data.read(&config, sizeof(struct audio_port_config));
- if (status != NO_ERROR) {
- return status;
- }
- status = AudioValidator::validateAudioPortConfig(config);
- if (status == NO_ERROR) {
- status = setAudioPortConfig(&config);
- }
- reply->writeInt32(status);
- return NO_ERROR;
- } break;
- case GET_AUDIO_HW_SYNC_FOR_SESSION: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- reply->writeInt32(getAudioHwSyncForSession((audio_session_t) data.readInt32()));
- return NO_ERROR;
- } break;
- case SYSTEM_READY: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- systemReady();
- return NO_ERROR;
- } break;
- case FRAME_COUNT_HAL: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- reply->writeInt64( frameCountHAL((audio_io_handle_t) data.readInt32()) );
- return NO_ERROR;
- } break;
- case GET_MICROPHONES: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- std::vector<media::MicrophoneInfo> microphones;
- status_t status = getMicrophones(µphones);
- reply->writeInt32(status);
- if (status == NO_ERROR) {
- reply->writeParcelableVector(microphones);
- }
- return NO_ERROR;
- }
- case SET_AUDIO_HAL_PIDS: {
- CHECK_INTERFACE(IAudioFlinger, data, reply);
- std::vector<pid_t> pids;
- int32_t size;
- status_t status = data.readInt32(&size);
- if (status != NO_ERROR) {
- return status;
- }
- if (size < 0) {
- return BAD_VALUE;
- }
- if (size > MAX_ITEMS_PER_LIST) {
- size = MAX_ITEMS_PER_LIST;
- }
- for (int32_t i = 0; i < size; i++) {
- int32_t pid;
- status = data.readInt32(&pid);
- if (status != NO_ERROR) {
- return status;
- }
- pids.push_back(pid);
- }
- reply->writeInt32(setAudioHalPids(pids));
- return NO_ERROR;
- }
- default:
- return BBinder::onTransact(code, data, reply, flags);
- }
+status_t AudioFlingerClientAdapter::createTrack(const media::CreateTrackRequest& input,
+ media::CreateTrackResponse& output) {
+ return mDelegate->createTrack(input, &output).transactionError();
}
-// ----------------------------------------------------------------------------
+status_t AudioFlingerClientAdapter::createRecord(const media::CreateRecordRequest& input,
+ media::CreateRecordResponse& output) {
+ return mDelegate->createRecord(input, &output).transactionError();
+}
+
+uint32_t AudioFlingerClientAdapter::sampleRate(audio_io_handle_t ioHandle) const {
+ auto result = [&]() -> ConversionResult<uint32_t> {
+ int32_t ioHandleAidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(ioHandle));
+ int32_t aidlRet;
+ RETURN_IF_ERROR(mDelegate->sampleRate(ioHandleAidl, &aidlRet).transactionError());
+ return convertIntegral<uint32_t>(aidlRet);
+ }();
+ // Failure is ignored.
+ return result.value_or(0);
+}
+
+audio_format_t AudioFlingerClientAdapter::format(audio_io_handle_t output) const {
+ auto result = [&]() -> ConversionResult<audio_format_t> {
+ int32_t outputAidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(output));
+ media::audio::common::AudioFormat aidlRet;
+ RETURN_IF_ERROR(mDelegate->format(outputAidl, &aidlRet).transactionError());
+ return aidl2legacy_AudioFormat_audio_format_t(aidlRet);
+ }();
+ return result.value_or(AUDIO_FORMAT_INVALID);
+}
+
+size_t AudioFlingerClientAdapter::frameCount(audio_io_handle_t ioHandle) const {
+ auto result = [&]() -> ConversionResult<size_t> {
+ int32_t ioHandleAidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(ioHandle));
+ int64_t aidlRet;
+ RETURN_IF_ERROR(mDelegate->frameCount(ioHandleAidl, &aidlRet).transactionError());
+ return convertIntegral<size_t>(aidlRet);
+ }();
+ // Failure is ignored.
+ return result.value_or(0);
+}
+
+uint32_t AudioFlingerClientAdapter::latency(audio_io_handle_t output) const {
+ auto result = [&]() -> ConversionResult<uint32_t> {
+ int32_t outputAidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(output));
+ int32_t aidlRet;
+ RETURN_IF_ERROR(mDelegate->latency(outputAidl, &aidlRet).transactionError());
+ return convertIntegral<uint32_t>(aidlRet);
+ }();
+ // Failure is ignored.
+ return result.value_or(0);
+}
+
+status_t AudioFlingerClientAdapter::setMasterVolume(float value) {
+ return mDelegate->setMasterVolume(value).transactionError();
+}
+
+status_t AudioFlingerClientAdapter::setMasterMute(bool muted) {
+ return mDelegate->setMasterMute(muted).transactionError();
+}
+
+float AudioFlingerClientAdapter::masterVolume() const {
+ auto result = [&]() -> ConversionResult<float> {
+ float aidlRet;
+ RETURN_IF_ERROR(mDelegate->masterVolume(&aidlRet).transactionError());
+ return aidlRet;
+ }();
+ // Failure is ignored.
+ return result.value_or(0.f);
+}
+
+bool AudioFlingerClientAdapter::masterMute() const {
+ auto result = [&]() -> ConversionResult<bool> {
+ bool aidlRet;
+ RETURN_IF_ERROR(mDelegate->masterMute(&aidlRet).transactionError());
+ return aidlRet;
+ }();
+ // Failure is ignored.
+ return result.value_or(false);
+}
+
+status_t AudioFlingerClientAdapter::setMasterBalance(float balance) {
+ return mDelegate->setMasterBalance(balance).transactionError();
+}
+
+status_t AudioFlingerClientAdapter::getMasterBalance(float* balance) const{
+ return mDelegate->getMasterBalance(balance).transactionError();
+}
+
+status_t AudioFlingerClientAdapter::setStreamVolume(audio_stream_type_t stream, float value,
+ audio_io_handle_t output) {
+ media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
+ int32_t outputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
+ return mDelegate->setStreamVolume(streamAidl, value, outputAidl).transactionError();
+}
+
+status_t AudioFlingerClientAdapter::setStreamMute(audio_stream_type_t stream, bool muted) {
+ media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
+ return mDelegate->setStreamMute(streamAidl, muted).transactionError();
+}
+
+float AudioFlingerClientAdapter::streamVolume(audio_stream_type_t stream,
+ audio_io_handle_t output) const {
+ auto result = [&]() -> ConversionResult<float> {
+ media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
+ int32_t outputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
+ float aidlRet;
+ RETURN_IF_ERROR(
+ mDelegate->streamVolume(streamAidl, outputAidl, &aidlRet).transactionError());
+ return aidlRet;
+ }();
+ // Failure is ignored.
+ return result.value_or(0.f);
+}
+
+bool AudioFlingerClientAdapter::streamMute(audio_stream_type_t stream) const {
+ auto result = [&]() -> ConversionResult<bool> {
+ media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
+ bool aidlRet;
+ RETURN_IF_ERROR(
+ mDelegate->streamMute(streamAidl, &aidlRet).transactionError());
+ return aidlRet;
+ }();
+ // Failure is ignored.
+ return result.value_or(false);
+}
+
+status_t AudioFlingerClientAdapter::setMode(audio_mode_t mode) {
+ media::AudioMode modeAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_mode_t_AudioMode(mode));
+ return mDelegate->setMode(modeAidl).transactionError();
+}
+
+status_t AudioFlingerClientAdapter::setMicMute(bool state) {
+ return mDelegate->setMicMute(state).transactionError();
+}
+
+bool AudioFlingerClientAdapter::getMicMute() const {
+ auto result = [&]() -> ConversionResult<bool> {
+ bool aidlRet;
+ RETURN_IF_ERROR(
+ mDelegate->getMicMute(&aidlRet).transactionError());
+ return aidlRet;
+ }();
+ // Failure is ignored.
+ return result.value_or(false);
+}
+
+void AudioFlingerClientAdapter::setRecordSilenced(audio_port_handle_t portId, bool silenced) {
+ auto result = [&]() -> status_t {
+ int32_t portIdAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_port_handle_t_int32_t(portId));
+ return mDelegate->setRecordSilenced(portIdAidl, silenced).transactionError();
+ }();
+ // Failure is ignored.
+ (void) result;
+}
+
+status_t AudioFlingerClientAdapter::setParameters(audio_io_handle_t ioHandle,
+ const String8& keyValuePairs) {
+ int32_t ioHandleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(ioHandle));
+ std::string keyValuePairsAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_String8_string(keyValuePairs));
+ return mDelegate->setParameters(ioHandleAidl, keyValuePairsAidl).transactionError();
+}
+
+String8 AudioFlingerClientAdapter::getParameters(audio_io_handle_t ioHandle, const String8& keys)
+const {
+ auto result = [&]() -> ConversionResult<String8> {
+ int32_t ioHandleAidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(ioHandle));
+ std::string keysAidl = VALUE_OR_RETURN(legacy2aidl_String8_string(keys));
+ std::string aidlRet;
+ RETURN_IF_ERROR(
+ mDelegate->getParameters(ioHandleAidl, keysAidl, &aidlRet).transactionError());
+ return aidl2legacy_string_view_String8(aidlRet);
+ }();
+ // Failure is ignored.
+ return result.value_or(String8());
+}
+
+void AudioFlingerClientAdapter::registerClient(const sp<media::IAudioFlingerClient>& client) {
+ mDelegate->registerClient(client);
+ // Failure is ignored.
+}
+
+size_t AudioFlingerClientAdapter::getInputBufferSize(uint32_t sampleRate, audio_format_t format,
+ audio_channel_mask_t channelMask) const {
+ auto result = [&]() -> ConversionResult<size_t> {
+ int32_t sampleRateAidl = VALUE_OR_RETURN(convertIntegral<int32_t>(sampleRate));
+ media::audio::common::AudioFormat formatAidl = VALUE_OR_RETURN(
+ legacy2aidl_audio_format_t_AudioFormat(format));
+ int32_t channelMaskAidl = VALUE_OR_RETURN(
+ legacy2aidl_audio_channel_mask_t_int32_t(channelMask));
+ int64_t aidlRet;
+ RETURN_IF_ERROR(
+ mDelegate->getInputBufferSize(sampleRateAidl, formatAidl, channelMaskAidl,
+ &aidlRet).transactionError());
+ return convertIntegral<size_t>(aidlRet);
+ }();
+ // Failure is ignored.
+ return result.value_or(0);
+}
+
+status_t AudioFlingerClientAdapter::openOutput(const media::OpenOutputRequest& request,
+ media::OpenOutputResponse* response) {
+ return mDelegate->openOutput(request, response).transactionError();
+}
+
+audio_io_handle_t AudioFlingerClientAdapter::openDuplicateOutput(audio_io_handle_t output1,
+ audio_io_handle_t output2) {
+ auto result = [&]() -> ConversionResult<audio_io_handle_t> {
+ int32_t output1Aidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(output1));
+ int32_t output2Aidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(output2));
+ int32_t aidlRet;
+ RETURN_IF_ERROR(mDelegate->openDuplicateOutput(output1Aidl, output2Aidl,
+ &aidlRet).transactionError());
+ return aidl2legacy_int32_t_audio_io_handle_t(aidlRet);
+ }();
+ // Failure is ignored.
+ return result.value_or(0);
+}
+
+status_t AudioFlingerClientAdapter::closeOutput(audio_io_handle_t output) {
+ int32_t outputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
+ return mDelegate->closeOutput(outputAidl).transactionError();
+}
+
+status_t AudioFlingerClientAdapter::suspendOutput(audio_io_handle_t output) {
+ int32_t outputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
+ return mDelegate->suspendOutput(outputAidl).transactionError();
+}
+
+status_t AudioFlingerClientAdapter::restoreOutput(audio_io_handle_t output) {
+ int32_t outputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
+ return mDelegate->restoreOutput(outputAidl).transactionError();
+}
+
+status_t AudioFlingerClientAdapter::openInput(const media::OpenInputRequest& request,
+ media::OpenInputResponse* response) {
+ return mDelegate->openInput(request, response).transactionError();
+}
+
+status_t AudioFlingerClientAdapter::closeInput(audio_io_handle_t input) {
+ int32_t inputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(input));
+ return mDelegate->closeInput(inputAidl).transactionError();
+}
+
+status_t AudioFlingerClientAdapter::invalidateStream(audio_stream_type_t stream) {
+ media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
+ return mDelegate->invalidateStream(streamAidl).transactionError();
+}
+
+status_t AudioFlingerClientAdapter::setVoiceVolume(float volume) {
+ return mDelegate->setVoiceVolume(volume).transactionError();
+}
+
+status_t AudioFlingerClientAdapter::getRenderPosition(uint32_t* halFrames, uint32_t* dspFrames,
+ audio_io_handle_t output) const {
+ int32_t outputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
+ media::RenderPosition aidlRet;
+ RETURN_STATUS_IF_ERROR(mDelegate->getRenderPosition(outputAidl, &aidlRet).transactionError());
+ if (halFrames != nullptr) {
+ *halFrames = VALUE_OR_RETURN_STATUS(convertIntegral<uint32_t>(aidlRet.halFrames));
+ }
+ if (dspFrames != nullptr) {
+ *dspFrames = VALUE_OR_RETURN_STATUS(convertIntegral<uint32_t>(aidlRet.dspFrames));
+ }
+ return OK;
+}
+
+uint32_t AudioFlingerClientAdapter::getInputFramesLost(audio_io_handle_t ioHandle) const {
+ auto result = [&]() -> ConversionResult<uint32_t> {
+ int32_t ioHandleAidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(ioHandle));
+ int32_t aidlRet;
+ RETURN_IF_ERROR(mDelegate->getInputFramesLost(ioHandleAidl, &aidlRet).transactionError());
+ return convertIntegral<uint32_t>(aidlRet);
+ }();
+ // Failure is ignored.
+ return result.value_or(0);
+}
+
+audio_unique_id_t AudioFlingerClientAdapter::newAudioUniqueId(audio_unique_id_use_t use) {
+ auto result = [&]() -> ConversionResult<audio_unique_id_t> {
+ media::AudioUniqueIdUse useAidl = VALUE_OR_RETURN(
+ legacy2aidl_audio_unique_id_use_t_AudioUniqueIdUse(use));
+ int32_t aidlRet;
+ RETURN_IF_ERROR(mDelegate->newAudioUniqueId(useAidl, &aidlRet).transactionError());
+ return aidl2legacy_int32_t_audio_unique_id_t(aidlRet);
+ }();
+ return result.value_or(AUDIO_UNIQUE_ID_ALLOCATE);
+}
+
+void AudioFlingerClientAdapter::acquireAudioSessionId(audio_session_t audioSession, pid_t pid,
+ uid_t uid) {
+ [&]() -> status_t {
+ int32_t audioSessionAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_session_t_int32_t(audioSession));
+ int32_t pidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_pid_t_int32_t(pid));
+ int32_t uidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_pid_t_int32_t(uid));
+ return mDelegate->acquireAudioSessionId(audioSessionAidl, pidAidl,
+ uidAidl).transactionError();
+ }();
+ // Failure is ignored.
+}
+
+void AudioFlingerClientAdapter::releaseAudioSessionId(audio_session_t audioSession, pid_t pid) {
+ [&]() -> status_t {
+ int32_t audioSessionAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_session_t_int32_t(audioSession));
+ int32_t pidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_pid_t_int32_t(pid));
+ return mDelegate->releaseAudioSessionId(audioSessionAidl, pidAidl).transactionError();
+ }();
+ // Failure is ignored.
+}
+
+status_t AudioFlingerClientAdapter::queryNumberEffects(uint32_t* numEffects) const {
+ int32_t aidlRet;
+ RETURN_STATUS_IF_ERROR(mDelegate->queryNumberEffects(&aidlRet).transactionError());
+ if (numEffects != nullptr) {
+ *numEffects = VALUE_OR_RETURN_STATUS(convertIntegral<uint32_t>(aidlRet));
+ }
+ return OK;
+}
+
+status_t
+AudioFlingerClientAdapter::queryEffect(uint32_t index, effect_descriptor_t* pDescriptor) const {
+ int32_t indexAidl = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(index));
+ media::EffectDescriptor aidlRet;
+ RETURN_STATUS_IF_ERROR(mDelegate->queryEffect(indexAidl, &aidlRet).transactionError());
+ if (pDescriptor != nullptr) {
+ *pDescriptor = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_EffectDescriptor_effect_descriptor_t(aidlRet));
+ }
+ return OK;
+}
+
+status_t AudioFlingerClientAdapter::getEffectDescriptor(const effect_uuid_t* pEffectUUID,
+ const effect_uuid_t* pTypeUUID,
+ uint32_t preferredTypeFlag,
+ effect_descriptor_t* pDescriptor) const {
+ media::AudioUuid effectUuidAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_uuid_t_AudioUuid(*pEffectUUID));
+ media::AudioUuid typeUuidAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_uuid_t_AudioUuid(*pTypeUUID));
+ int32_t preferredTypeFlagAidl = VALUE_OR_RETURN_STATUS(
+ convertReinterpret<int32_t>(preferredTypeFlag));
+ media::EffectDescriptor aidlRet;
+ RETURN_STATUS_IF_ERROR(
+ mDelegate->getEffectDescriptor(effectUuidAidl, typeUuidAidl, preferredTypeFlagAidl,
+ &aidlRet).transactionError());
+ if (pDescriptor != nullptr) {
+ *pDescriptor = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_EffectDescriptor_effect_descriptor_t(aidlRet));
+ }
+ return OK;
+}
+
+status_t AudioFlingerClientAdapter::createEffect(const media::CreateEffectRequest& request,
+ media::CreateEffectResponse* response) {
+ return mDelegate->createEffect(request, response).transactionError();
+}
+
+status_t
+AudioFlingerClientAdapter::moveEffects(audio_session_t session, audio_io_handle_t srcOutput,
+ audio_io_handle_t dstOutput) {
+ int32_t sessionAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_session_t_int32_t(session));
+ int32_t srcOutputAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_io_handle_t_int32_t(srcOutput));
+ int32_t dstOutputAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_io_handle_t_int32_t(dstOutput));
+ return mDelegate->moveEffects(sessionAidl, srcOutputAidl, dstOutputAidl).transactionError();
+}
+
+void AudioFlingerClientAdapter::setEffectSuspended(int effectId,
+ audio_session_t sessionId,
+ bool suspended) {
+ [&]() -> status_t {
+ int32_t effectIdAidl = VALUE_OR_RETURN_STATUS(convertReinterpret<int32_t>(effectId));
+ int32_t sessionIdAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_session_t_int32_t(sessionId));
+ return mDelegate->setEffectSuspended(effectIdAidl, sessionIdAidl,
+ suspended).transactionError();
+ }();
+ // Failure is ignored.
+}
+
+audio_module_handle_t AudioFlingerClientAdapter::loadHwModule(const char* name) {
+ auto result = [&]() -> ConversionResult<audio_module_handle_t> {
+ std::string nameAidl(name);
+ int32_t aidlRet;
+ RETURN_IF_ERROR(mDelegate->loadHwModule(nameAidl, &aidlRet).transactionError());
+ return aidl2legacy_int32_t_audio_module_handle_t(aidlRet);
+ }();
+ // Failure is ignored.
+ return result.value_or(0);
+}
+
+uint32_t AudioFlingerClientAdapter::getPrimaryOutputSamplingRate() {
+ auto result = [&]() -> ConversionResult<uint32_t> {
+ int32_t aidlRet;
+ RETURN_IF_ERROR(mDelegate->getPrimaryOutputSamplingRate(&aidlRet).transactionError());
+ return convertIntegral<uint32_t>(aidlRet);
+ }();
+ // Failure is ignored.
+ return result.value_or(0);
+}
+
+size_t AudioFlingerClientAdapter::getPrimaryOutputFrameCount() {
+ auto result = [&]() -> ConversionResult<size_t> {
+ int64_t aidlRet;
+ RETURN_IF_ERROR(mDelegate->getPrimaryOutputFrameCount(&aidlRet).transactionError());
+ return convertIntegral<size_t>(aidlRet);
+ }();
+ // Failure is ignored.
+ return result.value_or(0);
+}
+
+status_t AudioFlingerClientAdapter::setLowRamDevice(bool isLowRamDevice, int64_t totalMemory) {
+ return mDelegate->setLowRamDevice(isLowRamDevice, totalMemory).transactionError();
+}
+
+status_t AudioFlingerClientAdapter::getAudioPort(struct audio_port_v7* port) {
+ media::AudioPort portAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_v7_AudioPort(*port));
+ media::AudioPort aidlRet;
+ RETURN_STATUS_IF_ERROR(mDelegate->getAudioPort(portAidl, &aidlRet).transactionError());
+ *port = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioPort_audio_port_v7(aidlRet));
+ return OK;
+}
+
+status_t AudioFlingerClientAdapter::createAudioPatch(const struct audio_patch* patch,
+ audio_patch_handle_t* handle) {
+ media::AudioPatch patchAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_patch_AudioPatch(*patch));
+ int32_t aidlRet;
+ RETURN_STATUS_IF_ERROR(mDelegate->createAudioPatch(patchAidl, &aidlRet).transactionError());
+ if (handle != nullptr) {
+ *handle = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_patch_handle_t(aidlRet));
+ }
+ return OK;
+}
+
+status_t AudioFlingerClientAdapter::releaseAudioPatch(audio_patch_handle_t handle) {
+ int32_t handleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_patch_handle_t_int32_t(handle));
+ return mDelegate->releaseAudioPatch(handleAidl).transactionError();
+}
+
+status_t AudioFlingerClientAdapter::listAudioPatches(unsigned int* num_patches,
+ struct audio_patch* patches) {
+ std::vector<media::AudioPatch> aidlRet;
+ int32_t maxPatches = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(*num_patches));
+ RETURN_STATUS_IF_ERROR(mDelegate->listAudioPatches(maxPatches, &aidlRet).transactionError());
+ *num_patches = VALUE_OR_RETURN_STATUS(convertIntegral<unsigned int>(aidlRet.size()));
+ return convertRange(aidlRet.begin(), aidlRet.end(), patches,
+ aidl2legacy_AudioPatch_audio_patch);
+}
+
+status_t AudioFlingerClientAdapter::setAudioPortConfig(const struct audio_port_config* config) {
+ media::AudioPortConfig configAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_port_config_AudioPortConfig(*config));
+ return mDelegate->setAudioPortConfig(configAidl).transactionError();
+}
+
+audio_hw_sync_t AudioFlingerClientAdapter::getAudioHwSyncForSession(audio_session_t sessionId) {
+ auto result = [&]() -> ConversionResult<audio_hw_sync_t> {
+ int32_t sessionIdAidl = VALUE_OR_RETURN(legacy2aidl_audio_session_t_int32_t(sessionId));
+ int32_t aidlRet;
+ RETURN_IF_ERROR(
+ mDelegate->getAudioHwSyncForSession(sessionIdAidl, &aidlRet).transactionError());
+ return aidl2legacy_int32_t_audio_hw_sync_t(aidlRet);
+ }();
+ return result.value_or(AUDIO_HW_SYNC_INVALID);
+}
+
+status_t AudioFlingerClientAdapter::systemReady() {
+ return mDelegate->systemReady().transactionError();
+}
+
+size_t AudioFlingerClientAdapter::frameCountHAL(audio_io_handle_t ioHandle) const {
+ auto result = [&]() -> ConversionResult<size_t> {
+ int32_t ioHandleAidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(ioHandle));
+ int64_t aidlRet;
+ RETURN_IF_ERROR(mDelegate->frameCountHAL(ioHandleAidl, &aidlRet).transactionError());
+ return convertIntegral<size_t>(aidlRet);
+ }();
+ // Failure is ignored.
+ return result.value_or(0);
+}
+
+status_t
+AudioFlingerClientAdapter::getMicrophones(std::vector<media::MicrophoneInfo>* microphones) {
+ std::vector<media::MicrophoneInfoData> aidlRet;
+ RETURN_STATUS_IF_ERROR(mDelegate->getMicrophones(&aidlRet).transactionError());
+ if (microphones != nullptr) {
+ *microphones = VALUE_OR_RETURN_STATUS(
+ convertContainer<std::vector<media::MicrophoneInfo>>(aidlRet,
+ media::aidl2legacy_MicrophoneInfo));
+ }
+ return OK;
+}
+
+status_t AudioFlingerClientAdapter::setAudioHalPids(const std::vector<pid_t>& pids) {
+ std::vector<int32_t> pidsAidl = VALUE_OR_RETURN_STATUS(
+ convertContainer<std::vector<int32_t>>(pids, legacy2aidl_pid_t_int32_t));
+ return mDelegate->setAudioHalPids(pidsAidl).transactionError();
+}
+
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// AudioFlingerServerAdapter
+AudioFlingerServerAdapter::AudioFlingerServerAdapter(
+ const sp<AudioFlingerServerAdapter::Delegate>& delegate) : mDelegate(delegate) {}
+
+status_t AudioFlingerServerAdapter::onTransact(uint32_t code, const Parcel& data, Parcel* reply,
+ uint32_t flags) {
+ return mDelegate->onPreTransact(static_cast<Delegate::TransactionCode>(code), data, flags)
+ ?: BnAudioFlingerService::onTransact(code, data, reply, flags);
+}
+
+status_t AudioFlingerServerAdapter::dump(int fd, const Vector<String16>& args) {
+ return mDelegate->dump(fd, args);
+}
+
+Status AudioFlingerServerAdapter::createTrack(const media::CreateTrackRequest& request,
+ media::CreateTrackResponse* _aidl_return) {
+ return Status::fromStatusT(mDelegate->createTrack(request, *_aidl_return));
+}
+
+Status AudioFlingerServerAdapter::createRecord(const media::CreateRecordRequest& request,
+ media::CreateRecordResponse* _aidl_return) {
+ return Status::fromStatusT(mDelegate->createRecord(request, *_aidl_return));
+}
+
+Status AudioFlingerServerAdapter::sampleRate(int32_t ioHandle, int32_t* _aidl_return) {
+ audio_io_handle_t ioHandleLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_int32_t_audio_io_handle_t(ioHandle));
+ *_aidl_return = VALUE_OR_RETURN_BINDER(
+ convertIntegral<int32_t>(mDelegate->sampleRate(ioHandleLegacy)));
+ return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::format(int32_t output,
+ media::audio::common::AudioFormat* _aidl_return) {
+ audio_io_handle_t outputLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_int32_t_audio_io_handle_t(output));
+ *_aidl_return = VALUE_OR_RETURN_BINDER(
+ legacy2aidl_audio_format_t_AudioFormat(mDelegate->format(outputLegacy)));
+ return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::frameCount(int32_t ioHandle, int64_t* _aidl_return) {
+ audio_io_handle_t ioHandleLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_int32_t_audio_io_handle_t(ioHandle));
+ *_aidl_return = VALUE_OR_RETURN_BINDER(
+ convertIntegral<int64_t>(mDelegate->frameCount(ioHandleLegacy)));
+ return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::latency(int32_t output, int32_t* _aidl_return) {
+ audio_io_handle_t outputLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_int32_t_audio_io_handle_t(output));
+ *_aidl_return = VALUE_OR_RETURN_BINDER(
+ convertIntegral<int32_t>(mDelegate->latency(outputLegacy)));
+ return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::setMasterVolume(float value) {
+ return Status::fromStatusT(mDelegate->setMasterVolume(value));
+}
+
+Status AudioFlingerServerAdapter::setMasterMute(bool muted) {
+ return Status::fromStatusT(mDelegate->setMasterMute(muted));
+}
+
+Status AudioFlingerServerAdapter::masterVolume(float* _aidl_return) {
+ *_aidl_return = mDelegate->masterVolume();
+ return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::masterMute(bool* _aidl_return) {
+ *_aidl_return = mDelegate->masterMute();
+ return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::setMasterBalance(float balance) {
+ return Status::fromStatusT(mDelegate->setMasterBalance(balance));
+}
+
+Status AudioFlingerServerAdapter::getMasterBalance(float* _aidl_return) {
+ return Status::fromStatusT(mDelegate->getMasterBalance(_aidl_return));
+}
+
+Status AudioFlingerServerAdapter::setStreamVolume(media::AudioStreamType stream, float value,
+ int32_t output) {
+ audio_stream_type_t streamLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_AudioStreamType_audio_stream_type_t(stream));
+ audio_io_handle_t outputLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_int32_t_audio_io_handle_t(output));
+ return Status::fromStatusT(mDelegate->setStreamVolume(streamLegacy, value, outputLegacy));
+}
+
+Status AudioFlingerServerAdapter::setStreamMute(media::AudioStreamType stream, bool muted) {
+ audio_stream_type_t streamLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_AudioStreamType_audio_stream_type_t(stream));
+ return Status::fromStatusT(mDelegate->setStreamMute(streamLegacy, muted));
+}
+
+Status AudioFlingerServerAdapter::streamVolume(media::AudioStreamType stream, int32_t output,
+ float* _aidl_return) {
+ audio_stream_type_t streamLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_AudioStreamType_audio_stream_type_t(stream));
+ audio_io_handle_t outputLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_int32_t_audio_io_handle_t(output));
+ *_aidl_return = mDelegate->streamVolume(streamLegacy, outputLegacy);
+ return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::streamMute(media::AudioStreamType stream, bool* _aidl_return) {
+ audio_stream_type_t streamLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_AudioStreamType_audio_stream_type_t(stream));
+ *_aidl_return = mDelegate->streamMute(streamLegacy);
+ return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::setMode(media::AudioMode mode) {
+ audio_mode_t modeLegacy = VALUE_OR_RETURN_BINDER(aidl2legacy_AudioMode_audio_mode_t(mode));
+ return Status::fromStatusT(mDelegate->setMode(modeLegacy));
+}
+
+Status AudioFlingerServerAdapter::setMicMute(bool state) {
+ return Status::fromStatusT(mDelegate->setMicMute(state));
+}
+
+Status AudioFlingerServerAdapter::getMicMute(bool* _aidl_return) {
+ *_aidl_return = mDelegate->getMicMute();
+ return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::setRecordSilenced(int32_t portId, bool silenced) {
+ audio_port_handle_t portIdLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_int32_t_audio_port_handle_t(portId));
+ mDelegate->setRecordSilenced(portIdLegacy, silenced);
+ return Status::ok();
+}
+
+Status
+AudioFlingerServerAdapter::setParameters(int32_t ioHandle, const std::string& keyValuePairs) {
+ audio_io_handle_t ioHandleLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_int32_t_audio_io_handle_t(ioHandle));
+ String8 keyValuePairsLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_string_view_String8(keyValuePairs));
+ return Status::fromStatusT(mDelegate->setParameters(ioHandleLegacy, keyValuePairsLegacy));
+}
+
+Status AudioFlingerServerAdapter::getParameters(int32_t ioHandle, const std::string& keys,
+ std::string* _aidl_return) {
+ audio_io_handle_t ioHandleLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_int32_t_audio_io_handle_t(ioHandle));
+ String8 keysLegacy = VALUE_OR_RETURN_BINDER(aidl2legacy_string_view_String8(keys));
+ *_aidl_return = VALUE_OR_RETURN_BINDER(
+ legacy2aidl_String8_string(mDelegate->getParameters(ioHandleLegacy, keysLegacy)));
+ return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::registerClient(const sp<media::IAudioFlingerClient>& client) {
+ mDelegate->registerClient(client);
+ return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::getInputBufferSize(int32_t sampleRate,
+ media::audio::common::AudioFormat format,
+ int32_t channelMask, int64_t* _aidl_return) {
+ uint32_t sampleRateLegacy = VALUE_OR_RETURN_BINDER(convertIntegral<uint32_t>(sampleRate));
+ audio_format_t formatLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_AudioFormat_audio_format_t(format));
+ audio_channel_mask_t channelMaskLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_int32_t_audio_channel_mask_t(channelMask));
+ size_t size = mDelegate->getInputBufferSize(sampleRateLegacy, formatLegacy, channelMaskLegacy);
+ *_aidl_return = VALUE_OR_RETURN_BINDER(convertIntegral<int64_t>(size));
+ return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::openOutput(const media::OpenOutputRequest& request,
+ media::OpenOutputResponse* _aidl_return) {
+ return Status::fromStatusT(mDelegate->openOutput(request, _aidl_return));
+}
+
+Status AudioFlingerServerAdapter::openDuplicateOutput(int32_t output1, int32_t output2,
+ int32_t* _aidl_return) {
+ audio_io_handle_t output1Legacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_int32_t_audio_io_handle_t(output1));
+ audio_io_handle_t output2Legacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_int32_t_audio_io_handle_t(output2));
+ audio_io_handle_t result = mDelegate->openDuplicateOutput(output1Legacy, output2Legacy);
+ *_aidl_return = VALUE_OR_RETURN_BINDER(legacy2aidl_audio_io_handle_t_int32_t(result));
+ return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::closeOutput(int32_t output) {
+ audio_io_handle_t outputLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_int32_t_audio_io_handle_t(output));
+ return Status::fromStatusT(mDelegate->closeOutput(outputLegacy));
+}
+
+Status AudioFlingerServerAdapter::suspendOutput(int32_t output) {
+ audio_io_handle_t outputLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_int32_t_audio_io_handle_t(output));
+ return Status::fromStatusT(mDelegate->suspendOutput(outputLegacy));
+}
+
+Status AudioFlingerServerAdapter::restoreOutput(int32_t output) {
+ audio_io_handle_t outputLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_int32_t_audio_io_handle_t(output));
+ return Status::fromStatusT(mDelegate->restoreOutput(outputLegacy));
+}
+
+Status AudioFlingerServerAdapter::openInput(const media::OpenInputRequest& request,
+ media::OpenInputResponse* _aidl_return) {
+ return Status::fromStatusT(mDelegate->openInput(request, _aidl_return));
+}
+
+Status AudioFlingerServerAdapter::closeInput(int32_t input) {
+ audio_io_handle_t inputLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_int32_t_audio_io_handle_t(input));
+ return Status::fromStatusT(mDelegate->closeInput(inputLegacy));
+}
+
+Status AudioFlingerServerAdapter::invalidateStream(media::AudioStreamType stream) {
+ audio_stream_type_t streamLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_AudioStreamType_audio_stream_type_t(stream));
+ return Status::fromStatusT(mDelegate->invalidateStream(streamLegacy));
+}
+
+Status AudioFlingerServerAdapter::setVoiceVolume(float volume) {
+ return Status::fromStatusT(mDelegate->setVoiceVolume(volume));
+}
+
+Status
+AudioFlingerServerAdapter::getRenderPosition(int32_t output, media::RenderPosition* _aidl_return) {
+ audio_io_handle_t outputLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_int32_t_audio_io_handle_t(output));
+ uint32_t halFramesLegacy;
+ uint32_t dspFramesLegacy;
+ RETURN_BINDER_IF_ERROR(
+ mDelegate->getRenderPosition(&halFramesLegacy, &dspFramesLegacy, outputLegacy));
+ _aidl_return->halFrames = VALUE_OR_RETURN_BINDER(convertIntegral<int32_t>(halFramesLegacy));
+ _aidl_return->dspFrames = VALUE_OR_RETURN_BINDER(convertIntegral<int32_t>(dspFramesLegacy));
+ return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::getInputFramesLost(int32_t ioHandle, int32_t* _aidl_return) {
+ audio_io_handle_t ioHandleLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_int32_t_audio_io_handle_t(ioHandle));
+ uint32_t result = mDelegate->getInputFramesLost(ioHandleLegacy);
+ *_aidl_return = VALUE_OR_RETURN_BINDER(convertIntegral<int32_t>(result));
+ return Status::ok();
+}
+
+Status
+AudioFlingerServerAdapter::newAudioUniqueId(media::AudioUniqueIdUse use, int32_t* _aidl_return) {
+ audio_unique_id_use_t useLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_AudioUniqueIdUse_audio_unique_id_use_t(use));
+ audio_unique_id_t result = mDelegate->newAudioUniqueId(useLegacy);
+ *_aidl_return = VALUE_OR_RETURN_BINDER(legacy2aidl_audio_unique_id_t_int32_t(result));
+ return Status::ok();
+}
+
+Status
+AudioFlingerServerAdapter::acquireAudioSessionId(int32_t audioSession, int32_t pid, int32_t uid) {
+ audio_session_t audioSessionLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_int32_t_audio_session_t(audioSession));
+ pid_t pidLegacy = VALUE_OR_RETURN_BINDER(aidl2legacy_int32_t_pid_t(pid));
+ uid_t uidLegacy = VALUE_OR_RETURN_BINDER(aidl2legacy_int32_t_uid_t(uid));
+ mDelegate->acquireAudioSessionId(audioSessionLegacy, pidLegacy, uidLegacy);
+ return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::releaseAudioSessionId(int32_t audioSession, int32_t pid) {
+ audio_session_t audioSessionLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_int32_t_audio_session_t(audioSession));
+ pid_t pidLegacy = VALUE_OR_RETURN_BINDER(aidl2legacy_int32_t_pid_t(pid));
+ mDelegate->releaseAudioSessionId(audioSessionLegacy, pidLegacy);
+ return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::queryNumberEffects(int32_t* _aidl_return) {
+ uint32_t result;
+ RETURN_BINDER_IF_ERROR(mDelegate->queryNumberEffects(&result));
+ *_aidl_return = VALUE_OR_RETURN_BINDER(convertIntegral<uint32_t>(result));
+ return Status::ok();
+}
+
+Status
+AudioFlingerServerAdapter::queryEffect(int32_t index, media::EffectDescriptor* _aidl_return) {
+ uint32_t indexLegacy = VALUE_OR_RETURN_BINDER(convertIntegral<uint32_t>(index));
+ effect_descriptor_t result;
+ RETURN_BINDER_IF_ERROR(mDelegate->queryEffect(indexLegacy, &result));
+ *_aidl_return = VALUE_OR_RETURN_BINDER(
+ legacy2aidl_effect_descriptor_t_EffectDescriptor(result));
+ return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::getEffectDescriptor(const media::AudioUuid& effectUUID,
+ const media::AudioUuid& typeUUID,
+ int32_t preferredTypeFlag,
+ media::EffectDescriptor* _aidl_return) {
+ effect_uuid_t effectUuidLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_AudioUuid_audio_uuid_t(effectUUID));
+ effect_uuid_t typeUuidLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_AudioUuid_audio_uuid_t(typeUUID));
+ uint32_t preferredTypeFlagLegacy = VALUE_OR_RETURN_BINDER(
+ convertReinterpret<uint32_t>(preferredTypeFlag));
+ effect_descriptor_t result;
+ RETURN_BINDER_IF_ERROR(mDelegate->getEffectDescriptor(&effectUuidLegacy, &typeUuidLegacy,
+ preferredTypeFlagLegacy, &result));
+ *_aidl_return = VALUE_OR_RETURN_BINDER(
+ legacy2aidl_effect_descriptor_t_EffectDescriptor(result));
+ return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::createEffect(const media::CreateEffectRequest& request,
+ media::CreateEffectResponse* _aidl_return) {
+ return Status::fromStatusT(mDelegate->createEffect(request, _aidl_return));
+}
+
+Status
+AudioFlingerServerAdapter::moveEffects(int32_t session, int32_t srcOutput, int32_t dstOutput) {
+ audio_session_t sessionLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_int32_t_audio_session_t(session));
+ audio_io_handle_t srcOutputLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_int32_t_audio_io_handle_t(srcOutput));
+ audio_io_handle_t dstOutputLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_int32_t_audio_io_handle_t(dstOutput));
+ return Status::fromStatusT(
+ mDelegate->moveEffects(sessionLegacy, srcOutputLegacy, dstOutputLegacy));
+}
+
+Status AudioFlingerServerAdapter::setEffectSuspended(int32_t effectId, int32_t sessionId,
+ bool suspended) {
+ int effectIdLegacy = VALUE_OR_RETURN_BINDER(convertReinterpret<int>(effectId));
+ audio_session_t sessionIdLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_int32_t_audio_session_t(sessionId));
+ mDelegate->setEffectSuspended(effectIdLegacy, sessionIdLegacy, suspended);
+ return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::loadHwModule(const std::string& name, int32_t* _aidl_return) {
+ audio_module_handle_t result = mDelegate->loadHwModule(name.c_str());
+ *_aidl_return = VALUE_OR_RETURN_BINDER(legacy2aidl_audio_module_handle_t_int32_t(result));
+ return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::getPrimaryOutputSamplingRate(int32_t* _aidl_return) {
+ *_aidl_return = VALUE_OR_RETURN_BINDER(
+ convertIntegral<int32_t>(mDelegate->getPrimaryOutputSamplingRate()));
+ return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::getPrimaryOutputFrameCount(int64_t* _aidl_return) {
+ *_aidl_return = VALUE_OR_RETURN_BINDER(
+ convertIntegral<int64_t>(mDelegate->getPrimaryOutputFrameCount()));
+ return Status::ok();
+
+}
+
+Status AudioFlingerServerAdapter::setLowRamDevice(bool isLowRamDevice, int64_t totalMemory) {
+ return Status::fromStatusT(mDelegate->setLowRamDevice(isLowRamDevice, totalMemory));
+}
+
+Status AudioFlingerServerAdapter::getAudioPort(const media::AudioPort& port,
+ media::AudioPort* _aidl_return) {
+ audio_port_v7 portLegacy = VALUE_OR_RETURN_BINDER(aidl2legacy_AudioPort_audio_port_v7(port));
+ RETURN_BINDER_IF_ERROR(mDelegate->getAudioPort(&portLegacy));
+ *_aidl_return = VALUE_OR_RETURN_BINDER(legacy2aidl_audio_port_v7_AudioPort(portLegacy));
+ return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::createAudioPatch(const media::AudioPatch& patch,
+ int32_t* _aidl_return) {
+ audio_patch patchLegacy = VALUE_OR_RETURN_BINDER(aidl2legacy_AudioPatch_audio_patch(patch));
+ audio_patch_handle_t handleLegacy;
+ RETURN_BINDER_IF_ERROR(mDelegate->createAudioPatch(&patchLegacy, &handleLegacy));
+ *_aidl_return = VALUE_OR_RETURN_BINDER(legacy2aidl_audio_patch_handle_t_int32_t(handleLegacy));
+ return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::releaseAudioPatch(int32_t handle) {
+ audio_patch_handle_t handleLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_int32_t_audio_patch_handle_t(handle));
+ return Status::fromStatusT(mDelegate->releaseAudioPatch(handleLegacy));
+}
+
+Status AudioFlingerServerAdapter::listAudioPatches(int32_t maxCount,
+ std::vector<media::AudioPatch>* _aidl_return) {
+ unsigned int count = VALUE_OR_RETURN_BINDER(convertIntegral<unsigned int>(maxCount));
+ count = std::min(count, static_cast<unsigned int>(MAX_ITEMS_PER_LIST));
+ std::unique_ptr<audio_patch[]> patchesLegacy(new audio_patch[count]);
+ RETURN_BINDER_IF_ERROR(mDelegate->listAudioPatches(&count, patchesLegacy.get()));
+ RETURN_BINDER_IF_ERROR(convertRange(&patchesLegacy[0],
+ &patchesLegacy[count],
+ std::back_inserter(*_aidl_return),
+ legacy2aidl_audio_patch_AudioPatch));
+ return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::setAudioPortConfig(const media::AudioPortConfig& config) {
+ audio_port_config configLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_AudioPortConfig_audio_port_config(config));
+ return Status::fromStatusT(mDelegate->setAudioPortConfig(&configLegacy));
+}
+
+Status AudioFlingerServerAdapter::getAudioHwSyncForSession(int32_t sessionId,
+ int32_t* _aidl_return) {
+ audio_session_t sessionIdLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_int32_t_audio_session_t(sessionId));
+ audio_hw_sync_t result = mDelegate->getAudioHwSyncForSession(sessionIdLegacy);
+ *_aidl_return = VALUE_OR_RETURN_BINDER(legacy2aidl_audio_hw_sync_t_int32_t(result));
+ return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::systemReady() {
+ return Status::fromStatusT(mDelegate->systemReady());
+}
+
+Status AudioFlingerServerAdapter::frameCountHAL(int32_t ioHandle, int64_t* _aidl_return) {
+ audio_io_handle_t ioHandleLegacy = VALUE_OR_RETURN_BINDER(
+ aidl2legacy_int32_t_audio_io_handle_t(ioHandle));
+ size_t result = mDelegate->frameCountHAL(ioHandleLegacy);
+ *_aidl_return = VALUE_OR_RETURN_BINDER(convertIntegral<int64_t>(result));
+ return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::getMicrophones(
+ std::vector<media::MicrophoneInfoData>* _aidl_return) {
+ std::vector<media::MicrophoneInfo> resultLegacy;
+ RETURN_BINDER_IF_ERROR(mDelegate->getMicrophones(&resultLegacy));
+ *_aidl_return = VALUE_OR_RETURN_BINDER(convertContainer<std::vector<media::MicrophoneInfoData>>(
+ resultLegacy, media::legacy2aidl_MicrophoneInfo));
+ return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::setAudioHalPids(const std::vector<int32_t>& pids) {
+ std::vector<pid_t> pidsLegacy = VALUE_OR_RETURN_BINDER(
+ convertContainer<std::vector<pid_t>>(pids, aidl2legacy_int32_t_pid_t));
+ RETURN_BINDER_IF_ERROR(mDelegate->setAudioHalPids(pidsLegacy));
+ return Status::ok();
+}
} // namespace android
diff --git a/media/libaudioclient/aidl/android/media/DeviceDescriptorBase.aidl b/media/libaudioclient/aidl/android/media/AudioMixLatencyClass.aidl
similarity index 62%
copy from media/libaudioclient/aidl/android/media/DeviceDescriptorBase.aidl
copy to media/libaudioclient/aidl/android/media/AudioMixLatencyClass.aidl
index aa0f149..d70b364 100644
--- a/media/libaudioclient/aidl/android/media/DeviceDescriptorBase.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioMixLatencyClass.aidl
@@ -13,22 +13,13 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
package android.media;
-import android.media.AudioPort;
-import android.media.AudioPortConfig;
-import android.media.AudioDevice;
-
/**
* {@hide}
*/
-parcelable DeviceDescriptorBase {
- AudioPort port;
- AudioPortConfig portConfig;
- AudioDevice device;
- /** Bitmask, indexed by AudioEncapsulationMode. */
- int encapsulationModes;
- /** Bitmask, indexed by AudioEncapsulationMetadataType. */
- int encapsulationMetadataTypes;
+@Backing(type="int")
+enum AudioMixLatencyClass {
+ LOW = 0,
+ NORMAL = 1,
}
diff --git a/media/libaudioclient/aidl/android/media/AudioPort.aidl b/media/libaudioclient/aidl/android/media/AudioPort.aidl
index 1aa532b..123aeb0 100644
--- a/media/libaudioclient/aidl/android/media/AudioPort.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPort.aidl
@@ -17,6 +17,8 @@
package android.media;
import android.media.AudioGain;
+import android.media.AudioPortConfig;
+import android.media.AudioPortExt;
import android.media.AudioPortRole;
import android.media.AudioPortType;
import android.media.AudioProfile;
@@ -25,11 +27,18 @@
* {@hide}
*/
parcelable AudioPort {
- /** Gain controllers. */
- AudioGain[] gains;
- @utf8InCpp String name;
- AudioPortType type;
+ /** Port unique ID. Interpreted as audio_port_handle_t. */
+ int id;
+ /** Sink or source. */
AudioPortRole role;
+ /** Device, mix ... */
+ AudioPortType type;
+ @utf8InCpp String name;
/** AudioProfiles supported by this port (format, Rates, Channels). */
AudioProfile[] profiles;
+ /** Gain controllers. */
+ AudioGain[] gains;
+ /** Current audio port configuration. */
+ AudioPortConfig activeConfig;
+ AudioPortExt ext;
}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfigExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfigExt.aidl
index 38da4f5..5d635b6 100644
--- a/media/libaudioclient/aidl/android/media/AudioPortConfigExt.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortConfigExt.aidl
@@ -29,7 +29,7 @@
* TODO(ytai): replace with the canonical representation for an empty union, as soon as it is
* established.
*/
- boolean nothing;
+ boolean unspecified;
/** Device specific info. */
AudioPortConfigDeviceExt device;
/** Mix specific info. */
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfigMixExtUseCase.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfigMixExtUseCase.aidl
index 9e5e081..c61f044 100644
--- a/media/libaudioclient/aidl/android/media/AudioPortConfigMixExtUseCase.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortConfigMixExtUseCase.aidl
@@ -29,7 +29,7 @@
* TODO(ytai): replace with the canonical representation for an empty union, as soon as it is
* established.
*/
- boolean nothing;
+ boolean unspecified;
/** This to be set if the containing config has the AudioPortRole::SOURCE role. */
AudioStreamType stream;
/** This to be set if the containing config has the AudioPortRole::SINK role. */
diff --git a/media/libaudioclient/aidl/android/media/DeviceDescriptorBase.aidl b/media/libaudioclient/aidl/android/media/AudioPortDeviceExt.aidl
similarity index 85%
rename from media/libaudioclient/aidl/android/media/DeviceDescriptorBase.aidl
rename to media/libaudioclient/aidl/android/media/AudioPortDeviceExt.aidl
index aa0f149..b758f23 100644
--- a/media/libaudioclient/aidl/android/media/DeviceDescriptorBase.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortDeviceExt.aidl
@@ -16,16 +16,14 @@
package android.media;
-import android.media.AudioPort;
-import android.media.AudioPortConfig;
import android.media.AudioDevice;
/**
* {@hide}
*/
-parcelable DeviceDescriptorBase {
- AudioPort port;
- AudioPortConfig portConfig;
+parcelable AudioPortDeviceExt {
+ /** Module the device is attached to. Interpreted as audio_module_handle_t. */
+ int hwModule;
AudioDevice device;
/** Bitmask, indexed by AudioEncapsulationMode. */
int encapsulationModes;
diff --git a/media/libaudioclient/aidl/android/media/AudioPortExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortExt.aidl
new file mode 100644
index 0000000..453784b
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/AudioPortExt.aidl
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.AudioPortDeviceExt;
+import android.media.AudioPortMixExt;
+import android.media.AudioPortSessionExt;
+
+/**
+ * {@hide}
+ */
+union AudioPortExt {
+ /**
+ * This represents an empty union. Value is ignored.
+ * TODO(ytai): replace with the canonical representation for an empty union, as soon as it is
+ * established.
+ */
+ boolean unspecified;
+ /** Device specific info. */
+ AudioPortDeviceExt device;
+ /** Mix specific info. */
+ AudioPortMixExt mix;
+ /** Session specific info. */
+ AudioPortSessionExt session;
+}
diff --git a/media/libaudioclient/aidl/android/media/DeviceDescriptorBase.aidl b/media/libaudioclient/aidl/android/media/AudioPortMixExt.aidl
similarity index 62%
copy from media/libaudioclient/aidl/android/media/DeviceDescriptorBase.aidl
copy to media/libaudioclient/aidl/android/media/AudioPortMixExt.aidl
index aa0f149..62cdb8e 100644
--- a/media/libaudioclient/aidl/android/media/DeviceDescriptorBase.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortMixExt.aidl
@@ -16,19 +16,16 @@
package android.media;
-import android.media.AudioPort;
-import android.media.AudioPortConfig;
-import android.media.AudioDevice;
+import android.media.AudioMixLatencyClass;
/**
* {@hide}
*/
-parcelable DeviceDescriptorBase {
- AudioPort port;
- AudioPortConfig portConfig;
- AudioDevice device;
- /** Bitmask, indexed by AudioEncapsulationMode. */
- int encapsulationModes;
- /** Bitmask, indexed by AudioEncapsulationMetadataType. */
- int encapsulationMetadataTypes;
+parcelable AudioPortMixExt {
+ /** Module the stream is attached to. Interpreted as audio_module_handle_t. */
+ int hwModule;
+ /** I/O handle of the input/output stream. Interpreted as audio_io_handle_t. */
+ int handle;
+ /** Latency class */
+ AudioMixLatencyClass latencyClass;
}
diff --git a/media/libaudioclient/aidl/android/media/DeviceDescriptorBase.aidl b/media/libaudioclient/aidl/android/media/AudioPortSessionExt.aidl
similarity index 62%
copy from media/libaudioclient/aidl/android/media/DeviceDescriptorBase.aidl
copy to media/libaudioclient/aidl/android/media/AudioPortSessionExt.aidl
index aa0f149..dbca168 100644
--- a/media/libaudioclient/aidl/android/media/DeviceDescriptorBase.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortSessionExt.aidl
@@ -16,19 +16,10 @@
package android.media;
-import android.media.AudioPort;
-import android.media.AudioPortConfig;
-import android.media.AudioDevice;
-
/**
* {@hide}
*/
-parcelable DeviceDescriptorBase {
- AudioPort port;
- AudioPortConfig portConfig;
- AudioDevice device;
- /** Bitmask, indexed by AudioEncapsulationMode. */
- int encapsulationModes;
- /** Bitmask, indexed by AudioEncapsulationMetadataType. */
- int encapsulationMetadataTypes;
+parcelable AudioPortSessionExt {
+ /** Audio session. Interpreted as audio_session_t. */
+ int session;
}
diff --git a/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
new file mode 100644
index 0000000..e63f391
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
@@ -0,0 +1,205 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.AudioMode;
+import android.media.AudioPatch;
+import android.media.AudioPort;
+import android.media.AudioPortConfig;
+import android.media.AudioStreamType;
+import android.media.AudioUniqueIdUse;
+import android.media.AudioUuid;
+import android.media.CreateEffectRequest;
+import android.media.CreateEffectResponse;
+import android.media.CreateRecordRequest;
+import android.media.CreateRecordResponse;
+import android.media.CreateTrackRequest;
+import android.media.CreateTrackResponse;
+import android.media.OpenInputRequest;
+import android.media.OpenInputResponse;
+import android.media.OpenOutputRequest;
+import android.media.OpenOutputResponse;
+import android.media.EffectDescriptor;
+import android.media.IAudioFlingerClient;
+import android.media.IAudioRecord;
+import android.media.IAudioTrack;
+import android.media.MicrophoneInfoData;
+import android.media.RenderPosition;
+import android.media.audio.common.AudioFormat;
+
+/**
+ * {@hide}
+ */
+interface IAudioFlingerService {
+ /**
+ * Creates an audio track and registers it with AudioFlinger, or null if the track cannot be
+ * created.
+ */
+ CreateTrackResponse createTrack(in CreateTrackRequest request);
+
+ CreateRecordResponse createRecord(in CreateRecordRequest request);
+
+ // FIXME Surprisingly, format/latency don't work for input handles
+
+ /**
+ * Queries the audio hardware state. This state never changes, and therefore can be cached.
+ */
+ int sampleRate(int /* audio_io_handle_t */ ioHandle);
+
+ AudioFormat format(int /* audio_io_handle_t */ output);
+
+ long frameCount(int /* audio_io_handle_t */ ioHandle);
+
+ /**
+ * Return the estimated latency in milliseconds.
+ */
+ int latency(int /* audio_io_handle_t */ output);
+
+ /*
+ * Sets/gets the audio hardware state. This will probably be used by
+ * the preference panel, mostly.
+ */
+ void setMasterVolume(float value);
+ void setMasterMute(boolean muted);
+
+ float masterVolume();
+ boolean masterMute();
+
+ void setMasterBalance(float balance);
+ float getMasterBalance();
+
+ /*
+ * Set/gets stream type state. This will probably be used by
+ * the preference panel, mostly.
+ */
+ void setStreamVolume(AudioStreamType stream, float value, int /* audio_io_handle_t */ output);
+ void setStreamMute(AudioStreamType stream, boolean muted);
+ float streamVolume(AudioStreamType stream, int /* audio_io_handle_t */ output);
+ boolean streamMute(AudioStreamType stream);
+
+ // set audio mode.
+ void setMode(AudioMode mode);
+
+ // mic mute/state
+ void setMicMute(boolean state);
+ boolean getMicMute();
+ void setRecordSilenced(int /* audio_port_handle_t */ portId,
+ boolean silenced);
+
+ void setParameters(int /* audio_io_handle_t */ ioHandle,
+ @utf8InCpp String keyValuePairs);
+ @utf8InCpp String getParameters(int /* audio_io_handle_t */ ioHandle,
+ @utf8InCpp String keys);
+
+ // Register an object to receive audio input/output change and track notifications.
+ // For a given calling pid, AudioFlinger disregards any registrations after the first.
+ // Thus the IAudioFlingerClient must be a singleton per process.
+ void registerClient(IAudioFlingerClient client);
+
+ // Retrieve the audio recording buffer size in bytes.
+ // FIXME This API assumes a route, and so should be deprecated.
+ long getInputBufferSize(int sampleRate,
+ AudioFormat format,
+ int /* audio_channel_mask_t */ channelMask);
+
+ OpenOutputResponse openOutput(in OpenOutputRequest request);
+ int /* audio_io_handle_t */ openDuplicateOutput(int /* audio_io_handle_t */ output1,
+ int /* audio_io_handle_t */ output2);
+ void closeOutput(int /* audio_io_handle_t */ output);
+ void suspendOutput(int /* audio_io_handle_t */ output);
+ void restoreOutput(int /* audio_io_handle_t */ output);
+
+ OpenInputResponse openInput(in OpenInputRequest request);
+ void closeInput(int /* audio_io_handle_t */ input);
+
+ void invalidateStream(AudioStreamType stream);
+
+ void setVoiceVolume(float volume);
+
+ RenderPosition getRenderPosition(int /* audio_io_handle_t */ output);
+
+ int getInputFramesLost(int /* audio_io_handle_t */ ioHandle);
+
+ int /* audio_unique_id_t */ newAudioUniqueId(AudioUniqueIdUse use);
+
+ void acquireAudioSessionId(int /* audio_session_t */ audioSession,
+ int /* pid_t */ pid,
+ int /* uid_t */ uid);
+ void releaseAudioSessionId(int /* audio_session_t */ audioSession,
+ int /* pid_t */ pid);
+
+ int queryNumberEffects();
+
+ EffectDescriptor queryEffect(int index);
+
+ /** preferredTypeFlag is interpreted as a uint32_t with the "effect flag" format. */
+ EffectDescriptor getEffectDescriptor(in AudioUuid effectUUID,
+ in AudioUuid typeUUID,
+ int preferredTypeFlag);
+
+ CreateEffectResponse createEffect(in CreateEffectRequest request);
+
+ void moveEffects(int /* audio_session_t */ session,
+ int /* audio_io_handle_t */ srcOutput,
+ int /* audio_io_handle_t */ dstOutput);
+
+ void setEffectSuspended(int effectId,
+ int /* audio_session_t */ sessionId,
+ boolean suspended);
+
+ int /* audio_module_handle_t */ loadHwModule(@utf8InCpp String name);
+
+ // helpers for android.media.AudioManager.getProperty(), see description there for meaning
+ // FIXME move these APIs to AudioPolicy to permit a more accurate implementation
+ // that looks on primary device for a stream with fast flag, primary flag, or first one.
+ int getPrimaryOutputSamplingRate();
+ long getPrimaryOutputFrameCount();
+
+ // Intended for AudioService to inform AudioFlinger of device's low RAM attribute,
+ // and should be called at most once. For a definition of what "low RAM" means, see
+ // android.app.ActivityManager.isLowRamDevice(). The totalMemory parameter
+ // is obtained from android.app.ActivityManager.MemoryInfo.totalMem.
+ void setLowRamDevice(boolean isLowRamDevice, long totalMemory);
+
+ /* Get attributes for a given audio port */
+ AudioPort getAudioPort(in AudioPort port);
+
+ /* Create an audio patch between several source and sink ports */
+ int /* audio_patch_handle_t */ createAudioPatch(in AudioPatch patch);
+
+ /* Release an audio patch */
+ void releaseAudioPatch(int /* audio_patch_handle_t */ handle);
+
+ /* List existing audio patches */
+ AudioPatch[] listAudioPatches(int maxCount);
+ /* Set audio port configuration */
+ void setAudioPortConfig(in AudioPortConfig config);
+
+ /* Get the HW synchronization source used for an audio session */
+ int /* audio_hw_sync_t */ getAudioHwSyncForSession(int /* audio_session_t */ sessionId);
+
+ /* Indicate JAVA services are ready (scheduling, power management ...) */
+ oneway void systemReady();
+
+ // Returns the number of frames per audio HAL buffer.
+ long frameCountHAL(int /* audio_io_handle_t */ ioHandle);
+
+ /* List available microphones and their characteristics */
+ MicrophoneInfoData[] getMicrophones();
+
+ void setAudioHalPids(in int[] /* pid_t[] */ pids);
+}
diff --git a/media/libaudioclient/aidl/android/media/OpenOutputRequest.aidl b/media/libaudioclient/aidl/android/media/OpenOutputRequest.aidl
index 4518adb..06b12e9 100644
--- a/media/libaudioclient/aidl/android/media/OpenOutputRequest.aidl
+++ b/media/libaudioclient/aidl/android/media/OpenOutputRequest.aidl
@@ -17,7 +17,7 @@
package android.media;
import android.media.AudioConfig;
-import android.media.DeviceDescriptorBase;
+import android.media.AudioPort;
/**
* {@hide}
@@ -26,7 +26,8 @@
/** Interpreted as audio_module_handle_t. */
int module;
AudioConfig config;
- DeviceDescriptorBase device;
+ /** Type must be DEVICE. */
+ AudioPort device;
/** Bitmask, indexed by AudioOutputFlag. */
int flags;
}
diff --git a/media/libaudioclient/aidl/android/media/DeviceDescriptorBase.aidl b/media/libaudioclient/aidl/android/media/RenderPosition.aidl
similarity index 62%
copy from media/libaudioclient/aidl/android/media/DeviceDescriptorBase.aidl
copy to media/libaudioclient/aidl/android/media/RenderPosition.aidl
index aa0f149..98dc17a 100644
--- a/media/libaudioclient/aidl/android/media/DeviceDescriptorBase.aidl
+++ b/media/libaudioclient/aidl/android/media/RenderPosition.aidl
@@ -16,19 +16,10 @@
package android.media;
-import android.media.AudioPort;
-import android.media.AudioPortConfig;
-import android.media.AudioDevice;
-
/**
* {@hide}
*/
-parcelable DeviceDescriptorBase {
- AudioPort port;
- AudioPortConfig portConfig;
- AudioDevice device;
- /** Bitmask, indexed by AudioEncapsulationMode. */
- int encapsulationModes;
- /** Bitmask, indexed by AudioEncapsulationMetadataType. */
- int encapsulationMetadataTypes;
+parcelable RenderPosition {
+ int halFrames;
+ int dspFrames;
}
diff --git a/media/libaudioclient/include/media/AidlConversion.h b/media/libaudioclient/include/media/AidlConversion.h
index 894e56e..a6e5e2e 100644
--- a/media/libaudioclient/include/media/AidlConversion.h
+++ b/media/libaudioclient/include/media/AidlConversion.h
@@ -28,13 +28,23 @@
#include <android/media/AudioEncapsulationMode.h>
#include <android/media/AudioEncapsulationMetadataType.h>
#include <android/media/AudioFlag.h>
+#include <android/media/AudioGain.h>
#include <android/media/AudioGainMode.h>
#include <android/media/AudioInputFlags.h>
#include <android/media/AudioIoConfigEvent.h>
#include <android/media/AudioIoDescriptor.h>
+#include <android/media/AudioMixLatencyClass.h>
+#include <android/media/AudioMode.h>
#include <android/media/AudioOutputFlags.h>
+#include <android/media/AudioPort.h>
#include <android/media/AudioPortConfigType.h>
+#include <android/media/AudioPortDeviceExt.h>
+#include <android/media/AudioPortExt.h>
+#include <android/media/AudioPortMixExt.h>
+#include <android/media/AudioPortSessionExt.h>
+#include <android/media/AudioProfile.h>
#include <android/media/AudioTimestampInternal.h>
+#include <android/media/AudioUniqueIdUse.h>
#include <android/media/EffectDescriptor.h>
#include <android/media/SharedFileRegion.h>
@@ -67,6 +77,9 @@
ConversionResult<audio_unique_id_t> aidl2legacy_int32_t_audio_unique_id_t(int32_t aidl);
ConversionResult<int32_t> legacy2aidl_audio_unique_id_t_int32_t(audio_unique_id_t legacy);
+ConversionResult<audio_hw_sync_t> aidl2legacy_int32_t_audio_hw_sync_t(int32_t aidl);
+ConversionResult<int32_t> legacy2aidl_audio_hw_sync_t_int32_t(audio_hw_sync_t legacy);
+
// The legacy enum is unnamed. Thus, we use int.
ConversionResult<int> aidl2legacy_AudioPortConfigType(media::AudioPortConfigType aidl);
// The legacy enum is unnamed. Thus, we use int.
@@ -110,11 +123,13 @@
ConversionResult<media::audio::common::AudioFormat> legacy2aidl_audio_format_t_AudioFormat(
audio_format_t legacy);
-ConversionResult<int> aidl2legacy_AudioGainMode_int(media::AudioGainMode aidl);
-ConversionResult<media::AudioGainMode> legacy2aidl_int_AudioGainMode(int legacy);
+ConversionResult<audio_gain_mode_t>
+aidl2legacy_AudioGainMode_audio_gain_mode_t(media::AudioGainMode aidl);
+ConversionResult<media::AudioGainMode>
+legacy2aidl_audio_gain_mode_t_AudioGainMode(audio_gain_mode_t legacy);
-ConversionResult<audio_gain_mode_t> aidl2legacy_int32_t_audio_gain_mode_t(int32_t aidl);
-ConversionResult<int32_t> legacy2aidl_audio_gain_mode_t_int32_t(audio_gain_mode_t legacy);
+ConversionResult<audio_gain_mode_t> aidl2legacy_int32_t_audio_gain_mode_t_mask(int32_t aidl);
+ConversionResult<int32_t> legacy2aidl_audio_gain_mode_t_mask_int32_t(audio_gain_mode_t legacy);
ConversionResult<audio_devices_t> aidl2legacy_int32_t_audio_devices_t(int32_t aidl);
ConversionResult<int32_t> legacy2aidl_audio_devices_t_int32_t(audio_devices_t legacy);
@@ -279,4 +294,51 @@
ConversionResult<int32_t>
legacy2aidl_AudioEncapsulationMetadataType_mask(uint32_t legacy);
+ConversionResult<audio_mix_latency_class_t>
+aidl2legacy_AudioMixLatencyClass_audio_mix_latency_class_t(
+ media::AudioMixLatencyClass aidl);
+ConversionResult<media::AudioMixLatencyClass>
+legacy2aidl_audio_mix_latency_class_t_AudioMixLatencyClass(
+ audio_mix_latency_class_t legacy);
+
+ConversionResult<audio_port_device_ext>
+aidl2legacy_AudioPortDeviceExt_audio_port_device_ext(const media::AudioPortDeviceExt& aidl);
+ConversionResult<media::AudioPortDeviceExt>
+legacy2aidl_audio_port_device_ext_AudioPortDeviceExt(const audio_port_device_ext& legacy);
+
+ConversionResult<audio_port_mix_ext>
+aidl2legacy_AudioPortMixExt_audio_port_mix_ext(const media::AudioPortMixExt& aidl);
+ConversionResult<media::AudioPortMixExt>
+legacy2aidl_audio_port_mix_ext_AudioPortMixExt(const audio_port_mix_ext& legacy);
+
+ConversionResult<audio_port_session_ext>
+aidl2legacy_AudioPortSessionExt_audio_port_session_ext(const media::AudioPortSessionExt& aidl);
+ConversionResult<media::AudioPortSessionExt>
+legacy2aidl_audio_port_session_ext_AudioPortSessionExt(const audio_port_session_ext& legacy);
+
+ConversionResult<audio_profile>
+aidl2legacy_AudioProfile_audio_profile(const media::AudioProfile& aidl);
+ConversionResult<media::AudioProfile>
+legacy2aidl_audio_profile_AudioProfile(const audio_profile& legacy);
+
+ConversionResult<audio_gain>
+aidl2legacy_AudioGain_audio_gain(const media::AudioGain& aidl);
+ConversionResult<media::AudioGain>
+legacy2aidl_audio_gain_AudioGain(const audio_gain& legacy);
+
+ConversionResult<audio_port_v7>
+aidl2legacy_AudioPort_audio_port_v7(const media::AudioPort& aidl);
+ConversionResult<media::AudioPort>
+legacy2aidl_audio_port_v7_AudioPort(const audio_port_v7& legacy);
+
+ConversionResult<audio_mode_t>
+aidl2legacy_AudioMode_audio_mode_t(media::AudioMode aidl);
+ConversionResult<media::AudioMode>
+legacy2aidl_audio_mode_t_AudioMode(audio_mode_t legacy);
+
+ConversionResult<audio_unique_id_use_t>
+aidl2legacy_AudioUniqueIdUse_audio_unique_id_use_t(media::AudioUniqueIdUse aidl);
+ConversionResult<media::AudioUniqueIdUse>
+legacy2aidl_audio_unique_id_use_t_AudioUniqueIdUse(audio_unique_id_use_t legacy);
+
} // namespace android
diff --git a/media/libaudioclient/include/media/AidlConversionUtil.h b/media/libaudioclient/include/media/AidlConversionUtil.h
index 00e5ff2..6bfb743 100644
--- a/media/libaudioclient/include/media/AidlConversionUtil.h
+++ b/media/libaudioclient/include/media/AidlConversionUtil.h
@@ -82,6 +82,20 @@
}
/**
+ * A generic template that helps convert containers of convertible types, using iterators.
+ */
+template<typename InputIterator, typename OutputIterator, typename Func>
+status_t convertRange(InputIterator start,
+ InputIterator end,
+ OutputIterator out,
+ const Func& itemConversion) {
+ for (InputIterator iter = start; iter != end; ++iter, ++out) {
+ *out = VALUE_OR_RETURN_STATUS(itemConversion(*iter));
+ }
+ return OK;
+}
+
+/**
* A generic template that helps convert containers of convertible types.
*/
template<typename OutputContainer, typename InputContainer, typename Func>
@@ -95,4 +109,27 @@
return output;
}
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// Utilities for working with AIDL unions.
+// UNION_GET(obj, fieldname) returns a ConversionResult<T> containing either the strongly-typed
+// value of the respective field, or BAD_VALUE if the union is not set to the requested field.
+// UNION_SET(obj, fieldname, value) sets the requested field to the given value.
+
+template<typename T, typename T::Tag tag>
+using UnionFieldType = std::decay_t<decltype(std::declval<T>().template get<tag>())>;
+
+template<typename T, typename T::Tag tag>
+ConversionResult<UnionFieldType<T, tag>> unionGetField(const T& u) {
+ if (u.getTag() != tag) {
+ return base::unexpected(BAD_VALUE);
+ }
+ return u.template get<tag>();
+}
+
+#define UNION_GET(u, field) \
+ unionGetField<std::decay_t<decltype(u)>, std::decay_t<decltype(u)>::Tag::field>(u)
+
+#define UNION_SET(u, field, value) \
+ (u).set<std::decay_t<decltype(u)>::Tag::field>(value)
+
} // namespace android
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index 11d341e..9a8014d 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -24,8 +24,6 @@
#include <utils/RefBase.h>
#include <utils/Errors.h>
#include <binder/IInterface.h>
-#include <binder/Parcel.h>
-#include <binder/Parcelable.h>
#include <media/AidlConversion.h>
#include <media/AudioClient.h>
#include <media/DeviceDescriptorBase.h>
@@ -37,6 +35,8 @@
#include <string>
#include <vector>
+#include <android/media/BnAudioFlingerService.h>
+#include <android/media/BpAudioFlingerService.h>
#include "android/media/CreateEffectRequest.h"
#include "android/media/CreateEffectResponse.h"
#include "android/media/CreateRecordRequest.h"
@@ -58,10 +58,11 @@
// ----------------------------------------------------------------------------
-class IAudioFlinger : public IInterface
-{
+class IAudioFlinger : public RefBase {
public:
- DECLARE_META_INTERFACE(AudioFlinger);
+ static constexpr char DEFAULT_SERVICE_NAME[] = "media.audio_flinger";
+
+ virtual ~IAudioFlinger() = default;
/* CreateTrackInput contains all input arguments sent by AudioTrack to AudioFlinger
* when calling createTrack() including arguments that will be updated by AudioFlinger
@@ -162,7 +163,8 @@
sp<media::IAudioRecord> audioRecord;
ConversionResult<media::CreateRecordResponse> toAidl() const;
- static ConversionResult<CreateRecordOutput> fromAidl(const media::CreateRecordResponse& aidl);
+ static ConversionResult<CreateRecordOutput>
+ fromAidl(const media::CreateRecordResponse& aidl);
};
/* create an audio track and registers it with AudioFlinger.
@@ -300,10 +302,6 @@
// is obtained from android.app.ActivityManager.MemoryInfo.totalMem.
virtual status_t setLowRamDevice(bool isLowRamDevice, int64_t totalMemory) = 0;
- /* List available audio ports and their attributes */
- virtual status_t listAudioPorts(unsigned int *num_ports,
- struct audio_port *ports) = 0;
-
/* Get attributes for a given audio port */
virtual status_t getAudioPort(struct audio_port_v7 *port) = 0;
@@ -335,22 +333,282 @@
virtual status_t setAudioHalPids(const std::vector<pid_t>& pids) = 0;
};
-
-// ----------------------------------------------------------------------------
-
-class BnAudioFlinger : public BnInterface<IAudioFlinger>
-{
+/**
+ * A client-side adapter, wrapping an IAudioFlingerService instance and presenting it as an
+ * IAudioFlinger. Intended to be used by legacy client code that was written against IAudioFlinger,
+ * before IAudioFlingerService was introduced as an AIDL service.
+ * New clients should not use this adapter, but rather IAudioFlingerService directly, via
+ * BpAudioFlingerService.
+ */
+class AudioFlingerClientAdapter : public IAudioFlinger {
public:
- virtual status_t onTransact( uint32_t code,
- const Parcel& data,
- Parcel* reply,
- uint32_t flags = 0);
+ explicit AudioFlingerClientAdapter(const sp<media::IAudioFlingerService> delegate);
- // Requests media.log to start merging log buffers
- virtual void requestLogMerge() = 0;
+ status_t createTrack(const media::CreateTrackRequest& input,
+ media::CreateTrackResponse& output) override;
+ status_t createRecord(const media::CreateRecordRequest& input,
+ media::CreateRecordResponse& output) override;
+ uint32_t sampleRate(audio_io_handle_t ioHandle) const override;
+ audio_format_t format(audio_io_handle_t output) const override;
+ size_t frameCount(audio_io_handle_t ioHandle) const override;
+ uint32_t latency(audio_io_handle_t output) const override;
+ status_t setMasterVolume(float value) override;
+ status_t setMasterMute(bool muted) override;
+ float masterVolume() const override;
+ bool masterMute() const override;
+ status_t setMasterBalance(float balance) override;
+ status_t getMasterBalance(float* balance) const override;
+ status_t setStreamVolume(audio_stream_type_t stream, float value,
+ audio_io_handle_t output) override;
+ status_t setStreamMute(audio_stream_type_t stream, bool muted) override;
+ float streamVolume(audio_stream_type_t stream,
+ audio_io_handle_t output) const override;
+ bool streamMute(audio_stream_type_t stream) const override;
+ status_t setMode(audio_mode_t mode) override;
+ status_t setMicMute(bool state) override;
+ bool getMicMute() const override;
+ void setRecordSilenced(audio_port_handle_t portId, bool silenced) override;
+ status_t setParameters(audio_io_handle_t ioHandle,
+ const String8& keyValuePairs) override;
+ String8 getParameters(audio_io_handle_t ioHandle, const String8& keys)
+ const override;
+ void registerClient(const sp<media::IAudioFlingerClient>& client) override;
+ size_t getInputBufferSize(uint32_t sampleRate, audio_format_t format,
+ audio_channel_mask_t channelMask) const override;
+ status_t openOutput(const media::OpenOutputRequest& request,
+ media::OpenOutputResponse* response) override;
+ audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1,
+ audio_io_handle_t output2) override;
+ status_t closeOutput(audio_io_handle_t output) override;
+ status_t suspendOutput(audio_io_handle_t output) override;
+ status_t restoreOutput(audio_io_handle_t output) override;
+ status_t openInput(const media::OpenInputRequest& request,
+ media::OpenInputResponse* response) override;
+ status_t closeInput(audio_io_handle_t input) override;
+ status_t invalidateStream(audio_stream_type_t stream) override;
+ status_t setVoiceVolume(float volume) override;
+ status_t getRenderPosition(uint32_t* halFrames, uint32_t* dspFrames,
+ audio_io_handle_t output) const override;
+ uint32_t getInputFramesLost(audio_io_handle_t ioHandle) const override;
+ audio_unique_id_t newAudioUniqueId(audio_unique_id_use_t use) override;
+ void acquireAudioSessionId(audio_session_t audioSession, pid_t pid, uid_t uid) override;
+ void releaseAudioSessionId(audio_session_t audioSession, pid_t pid) override;
+ status_t queryNumberEffects(uint32_t* numEffects) const override;
+ status_t queryEffect(uint32_t index, effect_descriptor_t* pDescriptor) const override;
+ status_t getEffectDescriptor(const effect_uuid_t* pEffectUUID,
+ const effect_uuid_t* pTypeUUID,
+ uint32_t preferredTypeFlag,
+ effect_descriptor_t* pDescriptor) const override;
+ status_t createEffect(const media::CreateEffectRequest& request,
+ media::CreateEffectResponse* response) override;
+ status_t moveEffects(audio_session_t session, audio_io_handle_t srcOutput,
+ audio_io_handle_t dstOutput) override;
+ void setEffectSuspended(int effectId,
+ audio_session_t sessionId,
+ bool suspended) override;
+ audio_module_handle_t loadHwModule(const char* name) override;
+ uint32_t getPrimaryOutputSamplingRate() override;
+ size_t getPrimaryOutputFrameCount() override;
+ status_t setLowRamDevice(bool isLowRamDevice, int64_t totalMemory) override;
+ status_t getAudioPort(struct audio_port_v7* port) override;
+ status_t createAudioPatch(const struct audio_patch* patch,
+ audio_patch_handle_t* handle) override;
+ status_t releaseAudioPatch(audio_patch_handle_t handle) override;
+ status_t listAudioPatches(unsigned int* num_patches,
+ struct audio_patch* patches) override;
+ status_t setAudioPortConfig(const struct audio_port_config* config) override;
+ audio_hw_sync_t getAudioHwSyncForSession(audio_session_t sessionId) override;
+ status_t systemReady() override;
+ size_t frameCountHAL(audio_io_handle_t ioHandle) const override;
+ status_t getMicrophones(std::vector<media::MicrophoneInfo>* microphones) override;
+ status_t setAudioHalPids(const std::vector<pid_t>& pids) override;
+
+private:
+ const sp<media::IAudioFlingerService> mDelegate;
};
-// ----------------------------------------------------------------------------
+/**
+ * A server-side adapter, wrapping an IAudioFlinger instance and presenting it as an
+ * IAudioFlingerService. Intended to be used by legacy server code that was written against
+ * IAudioFlinger, before IAudioFlingerService was introduced as an AIDL service.
+ * New servers should not use this adapter, but rather implement IAudioFlingerService directly, via
+ * BnAudioFlingerService.
+ */
+class AudioFlingerServerAdapter : public media::BnAudioFlingerService {
+public:
+ using Status = binder::Status;
+
+ /**
+ * Legacy server should implement this interface in order to be wrapped.
+ */
+ class Delegate : public IAudioFlinger {
+ protected:
+ friend class AudioFlingerServerAdapter;
+
+ enum class TransactionCode {
+ CREATE_TRACK = media::BnAudioFlingerService::TRANSACTION_createTrack,
+ CREATE_RECORD = media::BnAudioFlingerService::TRANSACTION_createRecord,
+ SAMPLE_RATE = media::BnAudioFlingerService::TRANSACTION_sampleRate,
+ FORMAT = media::BnAudioFlingerService::TRANSACTION_format,
+ FRAME_COUNT = media::BnAudioFlingerService::TRANSACTION_frameCount,
+ LATENCY = media::BnAudioFlingerService::TRANSACTION_latency,
+ SET_MASTER_VOLUME = media::BnAudioFlingerService::TRANSACTION_setMasterVolume,
+ SET_MASTER_MUTE = media::BnAudioFlingerService::TRANSACTION_setMasterMute,
+ MASTER_VOLUME = media::BnAudioFlingerService::TRANSACTION_masterVolume,
+ MASTER_MUTE = media::BnAudioFlingerService::TRANSACTION_masterMute,
+ SET_STREAM_VOLUME = media::BnAudioFlingerService::TRANSACTION_setStreamVolume,
+ SET_STREAM_MUTE = media::BnAudioFlingerService::TRANSACTION_setStreamMute,
+ STREAM_VOLUME = media::BnAudioFlingerService::TRANSACTION_streamVolume,
+ STREAM_MUTE = media::BnAudioFlingerService::TRANSACTION_streamMute,
+ SET_MODE = media::BnAudioFlingerService::TRANSACTION_setMode,
+ SET_MIC_MUTE = media::BnAudioFlingerService::TRANSACTION_setMicMute,
+ GET_MIC_MUTE = media::BnAudioFlingerService::TRANSACTION_getMicMute,
+ SET_RECORD_SILENCED = media::BnAudioFlingerService::TRANSACTION_setRecordSilenced,
+ SET_PARAMETERS = media::BnAudioFlingerService::TRANSACTION_setParameters,
+ GET_PARAMETERS = media::BnAudioFlingerService::TRANSACTION_getParameters,
+ REGISTER_CLIENT = media::BnAudioFlingerService::TRANSACTION_registerClient,
+ GET_INPUTBUFFERSIZE = media::BnAudioFlingerService::TRANSACTION_getInputBufferSize,
+ OPEN_OUTPUT = media::BnAudioFlingerService::TRANSACTION_openOutput,
+ OPEN_DUPLICATE_OUTPUT = media::BnAudioFlingerService::TRANSACTION_openDuplicateOutput,
+ CLOSE_OUTPUT = media::BnAudioFlingerService::TRANSACTION_closeOutput,
+ SUSPEND_OUTPUT = media::BnAudioFlingerService::TRANSACTION_suspendOutput,
+ RESTORE_OUTPUT = media::BnAudioFlingerService::TRANSACTION_restoreOutput,
+ OPEN_INPUT = media::BnAudioFlingerService::TRANSACTION_openInput,
+ CLOSE_INPUT = media::BnAudioFlingerService::TRANSACTION_closeInput,
+ INVALIDATE_STREAM = media::BnAudioFlingerService::TRANSACTION_invalidateStream,
+ SET_VOICE_VOLUME = media::BnAudioFlingerService::TRANSACTION_setVoiceVolume,
+ GET_RENDER_POSITION = media::BnAudioFlingerService::TRANSACTION_getRenderPosition,
+ GET_INPUT_FRAMES_LOST = media::BnAudioFlingerService::TRANSACTION_getInputFramesLost,
+ NEW_AUDIO_UNIQUE_ID = media::BnAudioFlingerService::TRANSACTION_newAudioUniqueId,
+ ACQUIRE_AUDIO_SESSION_ID = media::BnAudioFlingerService::TRANSACTION_acquireAudioSessionId,
+ RELEASE_AUDIO_SESSION_ID = media::BnAudioFlingerService::TRANSACTION_releaseAudioSessionId,
+ QUERY_NUM_EFFECTS = media::BnAudioFlingerService::TRANSACTION_queryNumberEffects,
+ QUERY_EFFECT = media::BnAudioFlingerService::TRANSACTION_queryEffect,
+ GET_EFFECT_DESCRIPTOR = media::BnAudioFlingerService::TRANSACTION_getEffectDescriptor,
+ CREATE_EFFECT = media::BnAudioFlingerService::TRANSACTION_createEffect,
+ MOVE_EFFECTS = media::BnAudioFlingerService::TRANSACTION_moveEffects,
+ LOAD_HW_MODULE = media::BnAudioFlingerService::TRANSACTION_loadHwModule,
+ GET_PRIMARY_OUTPUT_SAMPLING_RATE = media::BnAudioFlingerService::TRANSACTION_getPrimaryOutputSamplingRate,
+ GET_PRIMARY_OUTPUT_FRAME_COUNT = media::BnAudioFlingerService::TRANSACTION_getPrimaryOutputFrameCount,
+ SET_LOW_RAM_DEVICE = media::BnAudioFlingerService::TRANSACTION_setLowRamDevice,
+ GET_AUDIO_PORT = media::BnAudioFlingerService::TRANSACTION_getAudioPort,
+ CREATE_AUDIO_PATCH = media::BnAudioFlingerService::TRANSACTION_createAudioPatch,
+ RELEASE_AUDIO_PATCH = media::BnAudioFlingerService::TRANSACTION_releaseAudioPatch,
+ LIST_AUDIO_PATCHES = media::BnAudioFlingerService::TRANSACTION_listAudioPatches,
+ SET_AUDIO_PORT_CONFIG = media::BnAudioFlingerService::TRANSACTION_setAudioPortConfig,
+ GET_AUDIO_HW_SYNC_FOR_SESSION = media::BnAudioFlingerService::TRANSACTION_getAudioHwSyncForSession,
+ SYSTEM_READY = media::BnAudioFlingerService::TRANSACTION_systemReady,
+ FRAME_COUNT_HAL = media::BnAudioFlingerService::TRANSACTION_frameCountHAL,
+ GET_MICROPHONES = media::BnAudioFlingerService::TRANSACTION_getMicrophones,
+ SET_MASTER_BALANCE = media::BnAudioFlingerService::TRANSACTION_setMasterBalance,
+ GET_MASTER_BALANCE = media::BnAudioFlingerService::TRANSACTION_getMasterBalance,
+ SET_EFFECT_SUSPENDED = media::BnAudioFlingerService::TRANSACTION_setEffectSuspended,
+ SET_AUDIO_HAL_PIDS = media::BnAudioFlingerService::TRANSACTION_setAudioHalPids,
+ };
+
+ /**
+ * And optional hook, called on every transaction, before unparceling the data and
+ * dispatching to the respective method. Useful for bulk operations, such as logging or
+ * permission checks.
+ * If an error status is returned, the transaction will return immediately and will not be
+ * processed.
+ */
+ virtual status_t onPreTransact(TransactionCode code, const Parcel& data, uint32_t flags) {
+ (void) code;
+ (void) data;
+ (void) flags;
+ return OK;
+ };
+
+ /**
+ * An optional hook for implementing diagnostics dumping.
+ */
+ virtual status_t dump(int fd, const Vector<String16>& args) {
+ (void) fd;
+ (void) args;
+ return OK;
+ }
+ };
+
+ explicit AudioFlingerServerAdapter(
+ const sp<AudioFlingerServerAdapter::Delegate>& delegate);
+
+ status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) override;
+ status_t dump(int fd, const Vector<String16>& args) override;
+
+ Status createTrack(const media::CreateTrackRequest& request,
+ media::CreateTrackResponse* _aidl_return) override;
+ Status createRecord(const media::CreateRecordRequest& request,
+ media::CreateRecordResponse* _aidl_return) override;
+ Status sampleRate(int32_t ioHandle, int32_t* _aidl_return) override;
+ Status format(int32_t output, media::audio::common::AudioFormat* _aidl_return) override;
+ Status frameCount(int32_t ioHandle, int64_t* _aidl_return) override;
+ Status latency(int32_t output, int32_t* _aidl_return) override;
+ Status setMasterVolume(float value) override;
+ Status setMasterMute(bool muted) override;
+ Status masterVolume(float* _aidl_return) override;
+ Status masterMute(bool* _aidl_return) override;
+ Status setMasterBalance(float balance) override;
+ Status getMasterBalance(float* _aidl_return) override;
+ Status setStreamVolume(media::AudioStreamType stream, float value, int32_t output) override;
+ Status setStreamMute(media::AudioStreamType stream, bool muted) override;
+ Status
+ streamVolume(media::AudioStreamType stream, int32_t output, float* _aidl_return) override;
+ Status streamMute(media::AudioStreamType stream, bool* _aidl_return) override;
+ Status setMode(media::AudioMode mode) override;
+ Status setMicMute(bool state) override;
+ Status getMicMute(bool* _aidl_return) override;
+ Status setRecordSilenced(int32_t portId, bool silenced) override;
+ Status setParameters(int32_t ioHandle, const std::string& keyValuePairs) override;
+ Status
+ getParameters(int32_t ioHandle, const std::string& keys, std::string* _aidl_return) override;
+ Status registerClient(const sp<media::IAudioFlingerClient>& client) override;
+ Status getInputBufferSize(int32_t sampleRate, media::audio::common::AudioFormat format,
+ int32_t channelMask, int64_t* _aidl_return) override;
+ Status openOutput(const media::OpenOutputRequest& request,
+ media::OpenOutputResponse* _aidl_return) override;
+ Status openDuplicateOutput(int32_t output1, int32_t output2, int32_t* _aidl_return) override;
+ Status closeOutput(int32_t output) override;
+ Status suspendOutput(int32_t output) override;
+ Status restoreOutput(int32_t output) override;
+ Status openInput(const media::OpenInputRequest& request,
+ media::OpenInputResponse* _aidl_return) override;
+ Status closeInput(int32_t input) override;
+ Status invalidateStream(media::AudioStreamType stream) override;
+ Status setVoiceVolume(float volume) override;
+ Status getRenderPosition(int32_t output, media::RenderPosition* _aidl_return) override;
+ Status getInputFramesLost(int32_t ioHandle, int32_t* _aidl_return) override;
+ Status newAudioUniqueId(media::AudioUniqueIdUse use, int32_t* _aidl_return) override;
+ Status acquireAudioSessionId(int32_t audioSession, int32_t pid, int32_t uid) override;
+ Status releaseAudioSessionId(int32_t audioSession, int32_t pid) override;
+ Status queryNumberEffects(int32_t* _aidl_return) override;
+ Status queryEffect(int32_t index, media::EffectDescriptor* _aidl_return) override;
+ Status getEffectDescriptor(const media::AudioUuid& effectUUID, const media::AudioUuid& typeUUID,
+ int32_t preferredTypeFlag,
+ media::EffectDescriptor* _aidl_return) override;
+ Status createEffect(const media::CreateEffectRequest& request,
+ media::CreateEffectResponse* _aidl_return) override;
+ Status moveEffects(int32_t session, int32_t srcOutput, int32_t dstOutput) override;
+ Status setEffectSuspended(int32_t effectId, int32_t sessionId, bool suspended) override;
+ Status loadHwModule(const std::string& name, int32_t* _aidl_return) override;
+ Status getPrimaryOutputSamplingRate(int32_t* _aidl_return) override;
+ Status getPrimaryOutputFrameCount(int64_t* _aidl_return) override;
+ Status setLowRamDevice(bool isLowRamDevice, int64_t totalMemory) override;
+ Status getAudioPort(const media::AudioPort& port, media::AudioPort* _aidl_return) override;
+ Status createAudioPatch(const media::AudioPatch& patch, int32_t* _aidl_return) override;
+ Status releaseAudioPatch(int32_t handle) override;
+ Status listAudioPatches(int32_t maxCount,
+ std::vector<media::AudioPatch>* _aidl_return) override;
+ Status setAudioPortConfig(const media::AudioPortConfig& config) override;
+ Status getAudioHwSyncForSession(int32_t sessionId, int32_t* _aidl_return) override;
+ Status systemReady() override;
+ Status frameCountHAL(int32_t ioHandle, int64_t* _aidl_return) override;
+ Status getMicrophones(std::vector<media::MicrophoneInfoData>* _aidl_return) override;
+ Status setAudioHalPids(const std::vector<int32_t>& pids) override;
+
+private:
+ const sp<AudioFlingerServerAdapter::Delegate> mDelegate;
+};
}; // namespace android
diff --git a/media/libaudiofoundation/AudioGain.cpp b/media/libaudiofoundation/AudioGain.cpp
index c59e966..56343d8 100644
--- a/media/libaudiofoundation/AudioGain.cpp
+++ b/media/libaudiofoundation/AudioGain.cpp
@@ -139,7 +139,8 @@
parcelable->index = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mIndex));
parcelable->useInChannelMask = mUseInChannelMask;
parcelable->useForVolume = mUseForVolume;
- parcelable->mode = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_gain_mode_t_int32_t(mGain.mode));
+ parcelable->mode = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_gain_mode_t_mask_int32_t(mGain.mode));
parcelable->channelMask = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_channel_mask_t_int32_t(mGain.channel_mask));
parcelable->minValue = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mGain.min_value));
@@ -162,7 +163,8 @@
mIndex = VALUE_OR_RETURN_STATUS(convertIntegral<int>(parcelable.index));
mUseInChannelMask = parcelable.useInChannelMask;
mUseForVolume = parcelable.useForVolume;
- mGain.mode = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_gain_mode_t(parcelable.mode));
+ mGain.mode = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_int32_t_audio_gain_mode_t_mask(parcelable.mode));
mGain.channel_mask = VALUE_OR_RETURN_STATUS(
aidl2legacy_int32_t_audio_channel_mask_t(parcelable.channelMask));
mGain.min_value = VALUE_OR_RETURN_STATUS(convertIntegral<int>(parcelable.minValue));
diff --git a/media/libaudiofoundation/AudioPort.cpp b/media/libaudiofoundation/AudioPort.cpp
index 559c711..6b63675 100644
--- a/media/libaudiofoundation/AudioPort.cpp
+++ b/media/libaudiofoundation/AudioPort.cpp
@@ -291,7 +291,7 @@
parcelable->id = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_handle_t_int32_t(mId));
parcelable->gain.index = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mGain.index));
parcelable->gain.mode = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_gain_mode_t_int32_t(mGain.mode));
+ legacy2aidl_audio_gain_mode_t_mask_int32_t(mGain.mode));
parcelable->gain.channelMask = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_channel_mask_t_int32_t(mGain.channel_mask));
parcelable->gain.rampDurationMs = VALUE_OR_RETURN_STATUS(
@@ -315,7 +315,7 @@
mId = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_port_handle_t(parcelable.id));
mGain.index = VALUE_OR_RETURN_STATUS(convertIntegral<int>(parcelable.gain.index));
mGain.mode = VALUE_OR_RETURN_STATUS(
- aidl2legacy_int32_t_audio_gain_mode_t(parcelable.gain.mode));
+ aidl2legacy_int32_t_audio_gain_mode_t_mask(parcelable.gain.mode));
mGain.channel_mask = VALUE_OR_RETURN_STATUS(
aidl2legacy_int32_t_audio_channel_mask_t(parcelable.gain.channelMask));
mGain.ramp_duration_ms = VALUE_OR_RETURN_STATUS(
diff --git a/media/libaudiofoundation/DeviceDescriptorBase.cpp b/media/libaudiofoundation/DeviceDescriptorBase.cpp
index 6261559..a3e9589 100644
--- a/media/libaudiofoundation/DeviceDescriptorBase.cpp
+++ b/media/libaudiofoundation/DeviceDescriptorBase.cpp
@@ -159,41 +159,49 @@
status_t DeviceDescriptorBase::writeToParcel(Parcel *parcel) const
{
- media::DeviceDescriptorBase parcelable;
+ media::AudioPort parcelable;
return writeToParcelable(&parcelable)
?: parcelable.writeToParcel(parcel);
}
-status_t DeviceDescriptorBase::writeToParcelable(media::DeviceDescriptorBase* parcelable) const {
- AudioPort::writeToParcelable(&parcelable->port);
- AudioPortConfig::writeToParcelable(&parcelable->portConfig);
- parcelable->device = VALUE_OR_RETURN_STATUS(
- legacy2aidl_AudioDeviceTypeAddress(mDeviceTypeAddr));
- parcelable->encapsulationModes = VALUE_OR_RETURN_STATUS(
+status_t DeviceDescriptorBase::writeToParcelable(media::AudioPort* parcelable) const {
+ AudioPort::writeToParcelable(parcelable);
+ AudioPortConfig::writeToParcelable(&parcelable->activeConfig);
+ parcelable->id = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_handle_t_int32_t(mId));
+
+ media::AudioPortDeviceExt ext;
+ ext.device = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioDeviceTypeAddress(mDeviceTypeAddr));
+ ext.encapsulationModes = VALUE_OR_RETURN_STATUS(
legacy2aidl_AudioEncapsulationMode_mask(mEncapsulationModes));
- parcelable->encapsulationMetadataTypes = VALUE_OR_RETURN_STATUS(
+ ext.encapsulationMetadataTypes = VALUE_OR_RETURN_STATUS(
legacy2aidl_AudioEncapsulationMetadataType_mask(mEncapsulationMetadataTypes));
+ UNION_SET(parcelable->ext, device, std::move(ext));
return OK;
}
status_t DeviceDescriptorBase::readFromParcel(const Parcel *parcel) {
- media::DeviceDescriptorBase parcelable;
+ media::AudioPort parcelable;
return parcelable.readFromParcel(parcel)
?: readFromParcelable(parcelable);
}
-status_t DeviceDescriptorBase::readFromParcelable(const media::DeviceDescriptorBase& parcelable) {
- status_t status = AudioPort::readFromParcelable(parcelable.port)
- ?: AudioPortConfig::readFromParcelable(parcelable.portConfig);
+status_t DeviceDescriptorBase::readFromParcelable(const media::AudioPort& parcelable) {
+ if (parcelable.type != media::AudioPortType::DEVICE) {
+ return BAD_VALUE;
+ }
+ status_t status = AudioPort::readFromParcelable(parcelable)
+ ?: AudioPortConfig::readFromParcelable(parcelable.activeConfig);
if (status != OK) {
return status;
}
+
+ media::AudioPortDeviceExt ext = VALUE_OR_RETURN_STATUS(UNION_GET(parcelable.ext, device));
mDeviceTypeAddr = VALUE_OR_RETURN_STATUS(
- aidl2legacy_AudioDeviceTypeAddress(parcelable.device));
+ aidl2legacy_AudioDeviceTypeAddress(ext.device));
mEncapsulationModes = VALUE_OR_RETURN_STATUS(
- aidl2legacy_AudioEncapsulationMode_mask(parcelable.encapsulationModes));
+ aidl2legacy_AudioEncapsulationMode_mask(ext.encapsulationModes));
mEncapsulationMetadataTypes = VALUE_OR_RETURN_STATUS(
- aidl2legacy_AudioEncapsulationMetadataType_mask(parcelable.encapsulationMetadataTypes));
+ aidl2legacy_AudioEncapsulationMetadataType_mask(ext.encapsulationMetadataTypes));
return OK;
}
@@ -219,7 +227,7 @@
}
ConversionResult<sp<DeviceDescriptorBase>>
-aidl2legacy_DeviceDescriptorBase(const media::DeviceDescriptorBase& aidl) {
+aidl2legacy_DeviceDescriptorBase(const media::AudioPort& aidl) {
sp<DeviceDescriptorBase> result = new DeviceDescriptorBase(AUDIO_DEVICE_NONE);
status_t status = result->readFromParcelable(aidl);
if (status != OK) {
@@ -228,9 +236,9 @@
return result;
}
-ConversionResult<media::DeviceDescriptorBase>
+ConversionResult<media::AudioPort>
legacy2aidl_DeviceDescriptorBase(const sp<DeviceDescriptorBase>& legacy) {
- media::DeviceDescriptorBase aidl;
+ media::AudioPort aidl;
status_t status = legacy->writeToParcelable(&aidl);
if (status != OK) {
return base::unexpected(status);
diff --git a/media/libaudiofoundation/include/media/DeviceDescriptorBase.h b/media/libaudiofoundation/include/media/DeviceDescriptorBase.h
index 8a920b7..140ce36 100644
--- a/media/libaudiofoundation/include/media/DeviceDescriptorBase.h
+++ b/media/libaudiofoundation/include/media/DeviceDescriptorBase.h
@@ -18,7 +18,7 @@
#include <vector>
-#include <android/media/DeviceDescriptorBase.h>
+#include <android/media/AudioPort.h>
#include <binder/Parcel.h>
#include <binder/Parcelable.h>
#include <media/AudioContainers.h>
@@ -77,8 +77,8 @@
status_t writeToParcel(Parcel* parcel) const override;
status_t readFromParcel(const Parcel* parcel) override;
- status_t writeToParcelable(media::DeviceDescriptorBase* parcelable) const;
- status_t readFromParcelable(const media::DeviceDescriptorBase& parcelable);
+ status_t writeToParcelable(media::AudioPort* parcelable) const;
+ status_t readFromParcelable(const media::AudioPort& parcelable);
protected:
AudioDeviceTypeAddr mDeviceTypeAddr;
@@ -113,8 +113,8 @@
// Conversion routines, according to AidlConversion.h conventions.
ConversionResult<sp<DeviceDescriptorBase>>
-aidl2legacy_DeviceDescriptorBase(const media::DeviceDescriptorBase& aidl);
-ConversionResult<media::DeviceDescriptorBase>
+aidl2legacy_DeviceDescriptorBase(const media::AudioPort& aidl);
+ConversionResult<media::AudioPort>
legacy2aidl_DeviceDescriptorBase(const sp<DeviceDescriptorBase>& legacy);
} // namespace android
diff --git a/media/libeffects/preprocessing/.clang-format b/media/libeffects/preprocessing/.clang-format
new file mode 120000
index 0000000..f1b4f69
--- /dev/null
+++ b/media/libeffects/preprocessing/.clang-format
@@ -0,0 +1 @@
+../../../../../build/soong/scripts/system-clang-format
\ No newline at end of file
diff --git a/media/libeffects/preprocessing/PreProcessing.cpp b/media/libeffects/preprocessing/PreProcessing.cpp
index d8840b2..1a5547b 100644
--- a/media/libeffects/preprocessing/PreProcessing.cpp
+++ b/media/libeffects/preprocessing/PreProcessing.cpp
@@ -18,17 +18,17 @@
#include <string.h>
#define LOG_TAG "PreProcessing"
//#define LOG_NDEBUG 0
-#include <utils/Log.h>
-#include <utils/Timers.h>
-#include <hardware/audio_effect.h>
#include <audio_effects/effect_aec.h>
#include <audio_effects/effect_agc.h>
+#include <hardware/audio_effect.h>
+#include <utils/Log.h>
+#include <utils/Timers.h>
#ifndef WEBRTC_LEGACY
#include <audio_effects/effect_agc2.h>
#endif
#include <audio_effects/effect_ns.h>
-#include <module_common_types.h>
#include <audio_processing.h>
+#include <module_common_types.h>
#ifdef WEBRTC_LEGACY
#include "speex/speex_resampler.h"
#endif
@@ -44,29 +44,28 @@
#define PREPROC_NUM_SESSIONS 8
// types of pre processing modules
-enum preproc_id
-{
- PREPROC_AGC, // Automatic Gain Control
+enum preproc_id {
+ PREPROC_AGC, // Automatic Gain Control
#ifndef WEBRTC_LEGACY
- PREPROC_AGC2, // Automatic Gain Control 2
+ PREPROC_AGC2, // Automatic Gain Control 2
#endif
- PREPROC_AEC, // Acoustic Echo Canceler
- PREPROC_NS, // Noise Suppressor
+ PREPROC_AEC, // Acoustic Echo Canceler
+ PREPROC_NS, // Noise Suppressor
PREPROC_NUM_EFFECTS
};
// Session state
enum preproc_session_state {
- PREPROC_SESSION_STATE_INIT, // initialized
- PREPROC_SESSION_STATE_CONFIG // configuration received
+ PREPROC_SESSION_STATE_INIT, // initialized
+ PREPROC_SESSION_STATE_CONFIG // configuration received
};
// Effect/Preprocessor state
enum preproc_effect_state {
- PREPROC_EFFECT_STATE_INIT, // initialized
- PREPROC_EFFECT_STATE_CREATED, // webRTC engine created
- PREPROC_EFFECT_STATE_CONFIG, // configuration received/disabled
- PREPROC_EFFECT_STATE_ACTIVE // active/enabled
+ PREPROC_EFFECT_STATE_INIT, // initialized
+ PREPROC_EFFECT_STATE_CREATED, // webRTC engine created
+ PREPROC_EFFECT_STATE_CONFIG, // configuration received/disabled
+ PREPROC_EFFECT_STATE_ACTIVE // active/enabled
};
// handle on webRTC engine
@@ -79,95 +78,95 @@
// Effect operation table. Functions for all pre processors are declared in sPreProcOps[] table.
// Function pointer can be null if no action required.
struct preproc_ops_s {
- int (* create)(preproc_effect_t *fx);
- int (* init)(preproc_effect_t *fx);
- int (* reset)(preproc_effect_t *fx);
- void (* enable)(preproc_effect_t *fx);
- void (* disable)(preproc_effect_t *fx);
- int (* set_parameter)(preproc_effect_t *fx, void *param, void *value);
- int (* get_parameter)(preproc_effect_t *fx, void *param, uint32_t *size, void *value);
- int (* set_device)(preproc_effect_t *fx, uint32_t device);
+ int (*create)(preproc_effect_t* fx);
+ int (*init)(preproc_effect_t* fx);
+ int (*reset)(preproc_effect_t* fx);
+ void (*enable)(preproc_effect_t* fx);
+ void (*disable)(preproc_effect_t* fx);
+ int (*set_parameter)(preproc_effect_t* fx, void* param, void* value);
+ int (*get_parameter)(preproc_effect_t* fx, void* param, uint32_t* size, void* value);
+ int (*set_device)(preproc_effect_t* fx, uint32_t device);
};
// Effect context
struct preproc_effect_s {
- const struct effect_interface_s *itfe;
- uint32_t procId; // type of pre processor (enum preproc_id)
- uint32_t state; // current state (enum preproc_effect_state)
- preproc_session_t *session; // session the effect is on
- const preproc_ops_t *ops; // effect ops table
- preproc_fx_handle_t engine; // handle on webRTC engine
- uint32_t type; // subtype of effect
+ const struct effect_interface_s* itfe;
+ uint32_t procId; // type of pre processor (enum preproc_id)
+ uint32_t state; // current state (enum preproc_effect_state)
+ preproc_session_t* session; // session the effect is on
+ const preproc_ops_t* ops; // effect ops table
+ preproc_fx_handle_t engine; // handle on webRTC engine
+ uint32_t type; // subtype of effect
#ifdef DUAL_MIC_TEST
- bool aux_channels_on; // support auxiliary channels
- size_t cur_channel_config; // current auciliary channel configuration
+ bool aux_channels_on; // support auxiliary channels
+ size_t cur_channel_config; // current auciliary channel configuration
#endif
};
// Session context
struct preproc_session_s {
- struct preproc_effect_s effects[PREPROC_NUM_EFFECTS]; // effects in this session
- uint32_t state; // current state (enum preproc_session_state)
- int id; // audio session ID
- int io; // handle of input stream this session is on
- webrtc::AudioProcessing* apm; // handle on webRTC audio processing module (APM)
+ struct preproc_effect_s effects[PREPROC_NUM_EFFECTS]; // effects in this session
+ uint32_t state; // current state (enum preproc_session_state)
+ int id; // audio session ID
+ int io; // handle of input stream this session is on
+ webrtc::AudioProcessing* apm; // handle on webRTC audio processing module (APM)
#ifndef WEBRTC_LEGACY
// Audio Processing module builder
webrtc::AudioProcessingBuilder ap_builder;
#endif
- size_t apmFrameCount; // buffer size for webRTC process (10 ms)
- uint32_t apmSamplingRate; // webRTC APM sampling rate (8/16 or 32 kHz)
- size_t frameCount; // buffer size before input resampler ( <=> apmFrameCount)
- uint32_t samplingRate; // sampling rate at effect process interface
- uint32_t inChannelCount; // input channel count
- uint32_t outChannelCount; // output channel count
- uint32_t createdMsk; // bit field containing IDs of crested pre processors
- uint32_t enabledMsk; // bit field containing IDs of enabled pre processors
- uint32_t processedMsk; // bit field containing IDs of pre processors already
- // processed in current round
+ size_t apmFrameCount; // buffer size for webRTC process (10 ms)
+ uint32_t apmSamplingRate; // webRTC APM sampling rate (8/16 or 32 kHz)
+ size_t frameCount; // buffer size before input resampler ( <=> apmFrameCount)
+ uint32_t samplingRate; // sampling rate at effect process interface
+ uint32_t inChannelCount; // input channel count
+ uint32_t outChannelCount; // output channel count
+ uint32_t createdMsk; // bit field containing IDs of crested pre processors
+ uint32_t enabledMsk; // bit field containing IDs of enabled pre processors
+ uint32_t processedMsk; // bit field containing IDs of pre processors already
+ // processed in current round
#ifdef WEBRTC_LEGACY
- webrtc::AudioFrame *procFrame; // audio frame passed to webRTC AMP ProcessStream()
+ webrtc::AudioFrame* procFrame; // audio frame passed to webRTC AMP ProcessStream()
#else
// audio config strucutre
webrtc::AudioProcessing::Config config;
webrtc::StreamConfig inputConfig; // input stream configuration
webrtc::StreamConfig outputConfig; // output stream configuration
#endif
- int16_t *inBuf; // input buffer used when resampling
- size_t inBufSize; // input buffer size in frames
- size_t framesIn; // number of frames in input buffer
+ int16_t* inBuf; // input buffer used when resampling
+ size_t inBufSize; // input buffer size in frames
+ size_t framesIn; // number of frames in input buffer
#ifdef WEBRTC_LEGACY
- SpeexResamplerState *inResampler; // handle on input speex resampler
+ SpeexResamplerState* inResampler; // handle on input speex resampler
#endif
- int16_t *outBuf; // output buffer used when resampling
- size_t outBufSize; // output buffer size in frames
- size_t framesOut; // number of frames in output buffer
+ int16_t* outBuf; // output buffer used when resampling
+ size_t outBufSize; // output buffer size in frames
+ size_t framesOut; // number of frames in output buffer
#ifdef WEBRTC_LEGACY
- SpeexResamplerState *outResampler; // handle on output speex resampler
+ SpeexResamplerState* outResampler; // handle on output speex resampler
#endif
- uint32_t revChannelCount; // number of channels on reverse stream
- uint32_t revEnabledMsk; // bit field containing IDs of enabled pre processors
- // with reverse channel
- uint32_t revProcessedMsk; // bit field containing IDs of pre processors with reverse
- // channel already processed in current round
+ uint32_t revChannelCount; // number of channels on reverse stream
+ uint32_t revEnabledMsk; // bit field containing IDs of enabled pre processors
+ // with reverse channel
+ uint32_t revProcessedMsk; // bit field containing IDs of pre processors with reverse
+ // channel already processed in current round
#ifdef WEBRTC_LEGACY
- webrtc::AudioFrame *revFrame; // audio frame passed to webRTC AMP AnalyzeReverseStream()
+ webrtc::AudioFrame* revFrame; // audio frame passed to webRTC AMP AnalyzeReverseStream()
#else
webrtc::StreamConfig revConfig; // reverse stream configuration.
#endif
- int16_t *revBuf; // reverse channel input buffer
- size_t revBufSize; // reverse channel input buffer size
- size_t framesRev; // number of frames in reverse channel input buffer
+ int16_t* revBuf; // reverse channel input buffer
+ size_t revBufSize; // reverse channel input buffer size
+ size_t framesRev; // number of frames in reverse channel input buffer
#ifdef WEBRTC_LEGACY
- SpeexResamplerState *revResampler; // handle on reverse channel input speex resampler
+ SpeexResamplerState* revResampler; // handle on reverse channel input speex resampler
#endif
};
#ifdef DUAL_MIC_TEST
enum {
- PREPROC_CMD_DUAL_MIC_ENABLE = EFFECT_CMD_FIRST_PROPRIETARY, // enable dual mic mode
- PREPROC_CMD_DUAL_MIC_PCM_DUMP_START, // start pcm capture
- PREPROC_CMD_DUAL_MIC_PCM_DUMP_STOP // stop pcm capture
+ PREPROC_CMD_DUAL_MIC_ENABLE = EFFECT_CMD_FIRST_PROPRIETARY, // enable dual mic mode
+ PREPROC_CMD_DUAL_MIC_PCM_DUMP_START, // start pcm capture
+ PREPROC_CMD_DUAL_MIC_PCM_DUMP_STOP // stop pcm capture
};
enum {
@@ -180,24 +179,22 @@
};
const channel_config_t sDualMicConfigs[CHANNEL_CFG_CNT] = {
- {AUDIO_CHANNEL_IN_MONO , 0},
- {AUDIO_CHANNEL_IN_STEREO , 0},
- {AUDIO_CHANNEL_IN_FRONT , AUDIO_CHANNEL_IN_BACK},
- {AUDIO_CHANNEL_IN_STEREO , AUDIO_CHANNEL_IN_RIGHT}
-};
+ {AUDIO_CHANNEL_IN_MONO, 0},
+ {AUDIO_CHANNEL_IN_STEREO, 0},
+ {AUDIO_CHANNEL_IN_FRONT, AUDIO_CHANNEL_IN_BACK},
+ {AUDIO_CHANNEL_IN_STEREO, AUDIO_CHANNEL_IN_RIGHT}};
bool sHasAuxChannels[PREPROC_NUM_EFFECTS] = {
- false, // PREPROC_AGC
+ false, // PREPROC_AGC
true, // PREPROC_AEC
true, // PREPROC_NS
};
bool gDualMicEnabled;
-FILE *gPcmDumpFh;
+FILE* gPcmDumpFh;
static pthread_mutex_t gPcmDumpLock = PTHREAD_MUTEX_INITIALIZER;
#endif
-
//------------------------------------------------------------------------------
// Effect descriptors
//------------------------------------------------------------------------------
@@ -207,88 +204,75 @@
// Automatic Gain Control
static const effect_descriptor_t sAgcDescriptor = {
- { 0x0a8abfe0, 0x654c, 0x11e0, 0xba26, { 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b } }, // type
- { 0xaa8130e0, 0x66fc, 0x11e0, 0xbad0, { 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b } }, // uuid
+ {0x0a8abfe0, 0x654c, 0x11e0, 0xba26, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}}, // type
+ {0xaa8130e0, 0x66fc, 0x11e0, 0xbad0, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}}, // uuid
EFFECT_CONTROL_API_VERSION,
- (EFFECT_FLAG_TYPE_PRE_PROC|EFFECT_FLAG_DEVICE_IND),
- 0, //FIXME indicate CPU load
- 0, //FIXME indicate memory usage
+ (EFFECT_FLAG_TYPE_PRE_PROC | EFFECT_FLAG_DEVICE_IND),
+ 0, // FIXME indicate CPU load
+ 0, // FIXME indicate memory usage
"Automatic Gain Control",
- "The Android Open Source Project"
-};
+ "The Android Open Source Project"};
#ifndef WEBRTC_LEGACY
// Automatic Gain Control 2
static const effect_descriptor_t sAgc2Descriptor = {
- { 0xae3c653b, 0xbe18, 0x4ab8, 0x8938, { 0x41, 0x8f, 0x0a, 0x7f, 0x06, 0xac } }, // type
- { 0x89f38e65, 0xd4d2, 0x4d64, 0xad0e, { 0x2b, 0x3e, 0x79, 0x9e, 0xa8, 0x86 } }, // uuid
+ {0xae3c653b, 0xbe18, 0x4ab8, 0x8938, {0x41, 0x8f, 0x0a, 0x7f, 0x06, 0xac}}, // type
+ {0x89f38e65, 0xd4d2, 0x4d64, 0xad0e, {0x2b, 0x3e, 0x79, 0x9e, 0xa8, 0x86}}, // uuid
EFFECT_CONTROL_API_VERSION,
- (EFFECT_FLAG_TYPE_PRE_PROC|EFFECT_FLAG_DEVICE_IND),
- 0, //FIXME indicate CPU load
- 0, //FIXME indicate memory usage
+ (EFFECT_FLAG_TYPE_PRE_PROC | EFFECT_FLAG_DEVICE_IND),
+ 0, // FIXME indicate CPU load
+ 0, // FIXME indicate memory usage
"Automatic Gain Control 2",
- "The Android Open Source Project"
-};
+ "The Android Open Source Project"};
#endif
// Acoustic Echo Cancellation
static const effect_descriptor_t sAecDescriptor = {
- { 0x7b491460, 0x8d4d, 0x11e0, 0xbd61, { 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b } }, // type
- { 0xbb392ec0, 0x8d4d, 0x11e0, 0xa896, { 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b } }, // uuid
+ {0x7b491460, 0x8d4d, 0x11e0, 0xbd61, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}}, // type
+ {0xbb392ec0, 0x8d4d, 0x11e0, 0xa896, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}}, // uuid
EFFECT_CONTROL_API_VERSION,
- (EFFECT_FLAG_TYPE_PRE_PROC|EFFECT_FLAG_DEVICE_IND),
- 0, //FIXME indicate CPU load
- 0, //FIXME indicate memory usage
+ (EFFECT_FLAG_TYPE_PRE_PROC | EFFECT_FLAG_DEVICE_IND),
+ 0, // FIXME indicate CPU load
+ 0, // FIXME indicate memory usage
"Acoustic Echo Canceler",
- "The Android Open Source Project"
-};
+ "The Android Open Source Project"};
// Noise suppression
static const effect_descriptor_t sNsDescriptor = {
- { 0x58b4b260, 0x8e06, 0x11e0, 0xaa8e, { 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b } }, // type
- { 0xc06c8400, 0x8e06, 0x11e0, 0x9cb6, { 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b } }, // uuid
+ {0x58b4b260, 0x8e06, 0x11e0, 0xaa8e, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}}, // type
+ {0xc06c8400, 0x8e06, 0x11e0, 0x9cb6, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}}, // uuid
EFFECT_CONTROL_API_VERSION,
- (EFFECT_FLAG_TYPE_PRE_PROC|EFFECT_FLAG_DEVICE_IND),
- 0, //FIXME indicate CPU load
- 0, //FIXME indicate memory usage
+ (EFFECT_FLAG_TYPE_PRE_PROC | EFFECT_FLAG_DEVICE_IND),
+ 0, // FIXME indicate CPU load
+ 0, // FIXME indicate memory usage
"Noise Suppression",
- "The Android Open Source Project"
-};
+ "The Android Open Source Project"};
-
-static const effect_descriptor_t *sDescriptors[PREPROC_NUM_EFFECTS] = {
- &sAgcDescriptor,
+static const effect_descriptor_t* sDescriptors[PREPROC_NUM_EFFECTS] = {&sAgcDescriptor,
#ifndef WEBRTC_LEGACY
- &sAgc2Descriptor,
+ &sAgc2Descriptor,
#endif
- &sAecDescriptor,
- &sNsDescriptor
-};
+ &sAecDescriptor,
+ &sNsDescriptor};
//------------------------------------------------------------------------------
// Helper functions
//------------------------------------------------------------------------------
-const effect_uuid_t * const sUuidToPreProcTable[PREPROC_NUM_EFFECTS] = {
- FX_IID_AGC,
+const effect_uuid_t* const sUuidToPreProcTable[PREPROC_NUM_EFFECTS] = {FX_IID_AGC,
#ifndef WEBRTC_LEGACY
- FX_IID_AGC2,
+ FX_IID_AGC2,
#endif
- FX_IID_AEC,
- FX_IID_NS
-};
+ FX_IID_AEC, FX_IID_NS};
-
-const effect_uuid_t * ProcIdToUuid(int procId)
-{
+const effect_uuid_t* ProcIdToUuid(int procId) {
if (procId >= PREPROC_NUM_EFFECTS) {
return EFFECT_UUID_NULL;
}
return sUuidToPreProcTable[procId];
}
-uint32_t UuidToProcId(const effect_uuid_t * uuid)
-{
+uint32_t UuidToProcId(const effect_uuid_t* uuid) {
size_t i;
for (i = 0; i < PREPROC_NUM_EFFECTS; i++) {
if (memcmp(uuid, sUuidToPreProcTable[i], sizeof(*uuid)) == 0) {
@@ -298,15 +282,13 @@
return i;
}
-bool HasReverseStream(uint32_t procId)
-{
+bool HasReverseStream(uint32_t procId) {
if (procId == PREPROC_AEC) {
return true;
}
return false;
}
-
//------------------------------------------------------------------------------
// Automatic Gain Control (AGC)
//------------------------------------------------------------------------------
@@ -316,24 +298,22 @@
static const bool kAgcDefaultLimiter = true;
#ifndef WEBRTC_LEGACY
-int Agc2Init (preproc_effect_t *effect)
-{
+int Agc2Init(preproc_effect_t* effect) {
ALOGV("Agc2Init");
effect->session->config = effect->session->apm->GetConfig();
effect->session->config.gain_controller2.fixed_digital.gain_db = 0.f;
effect->session->config.gain_controller2.adaptive_digital.level_estimator =
- effect->session->config.gain_controller2.kRms;
+ effect->session->config.gain_controller2.kRms;
effect->session->config.gain_controller2.adaptive_digital.extra_saturation_margin_db = 2.f;
effect->session->apm->ApplyConfig(effect->session->config);
return 0;
}
#endif
-int AgcInit (preproc_effect_t *effect)
-{
+int AgcInit(preproc_effect_t* effect) {
ALOGV("AgcInit");
#ifdef WEBRTC_LEGACY
- webrtc::GainControl *agc = static_cast<webrtc::GainControl *>(effect->engine);
+ webrtc::GainControl* agc = static_cast<webrtc::GainControl*>(effect->engine);
agc->set_mode(webrtc::GainControl::kFixedDigital);
agc->set_target_level_dbfs(kAgcDefaultTargetLevel);
agc->set_compression_gain_db(kAgcDefaultCompGain);
@@ -349,17 +329,15 @@
}
#ifndef WEBRTC_LEGACY
-int Agc2Create(preproc_effect_t *effect)
-{
+int Agc2Create(preproc_effect_t* effect) {
Agc2Init(effect);
return 0;
}
#endif
-int AgcCreate(preproc_effect_t *effect)
-{
+int AgcCreate(preproc_effect_t* effect) {
#ifdef WEBRTC_LEGACY
- webrtc::GainControl *agc = effect->session->apm->gain_control();
+ webrtc::GainControl* agc = effect->session->apm->gain_control();
ALOGV("AgcCreate got agc %p", agc);
if (agc == NULL) {
ALOGW("AgcCreate Error");
@@ -372,230 +350,216 @@
}
#ifndef WEBRTC_LEGACY
-int Agc2GetParameter(preproc_effect_t *effect,
- void *pParam,
- uint32_t *pValueSize,
- void *pValue)
-{
+int Agc2GetParameter(preproc_effect_t* effect, void* pParam, uint32_t* pValueSize, void* pValue) {
int status = 0;
- uint32_t param = *(uint32_t *)pParam;
- agc2_settings_t *pProperties = (agc2_settings_t *)pValue;
+ uint32_t param = *(uint32_t*)pParam;
+ agc2_settings_t* pProperties = (agc2_settings_t*)pValue;
switch (param) {
- case AGC2_PARAM_FIXED_DIGITAL_GAIN:
- if (*pValueSize < sizeof(float)) {
- *pValueSize = 0.f;
- return -EINVAL;
- }
- break;
- case AGC2_PARAM_ADAPT_DIGI_LEVEL_ESTIMATOR:
- if (*pValueSize < sizeof(int32_t)) {
- *pValueSize = 0;
- return -EINVAL;
- }
- break;
- case AGC2_PARAM_ADAPT_DIGI_EXTRA_SATURATION_MARGIN:
- if (*pValueSize < sizeof(float)) {
- *pValueSize = 0.f;
- return -EINVAL;
- }
- break;
- case AGC2_PARAM_PROPERTIES:
- if (*pValueSize < sizeof(agc2_settings_t)) {
- *pValueSize = 0;
- return -EINVAL;
- }
- break;
+ case AGC2_PARAM_FIXED_DIGITAL_GAIN:
+ if (*pValueSize < sizeof(float)) {
+ *pValueSize = 0.f;
+ return -EINVAL;
+ }
+ break;
+ case AGC2_PARAM_ADAPT_DIGI_LEVEL_ESTIMATOR:
+ if (*pValueSize < sizeof(int32_t)) {
+ *pValueSize = 0;
+ return -EINVAL;
+ }
+ break;
+ case AGC2_PARAM_ADAPT_DIGI_EXTRA_SATURATION_MARGIN:
+ if (*pValueSize < sizeof(float)) {
+ *pValueSize = 0.f;
+ return -EINVAL;
+ }
+ break;
+ case AGC2_PARAM_PROPERTIES:
+ if (*pValueSize < sizeof(agc2_settings_t)) {
+ *pValueSize = 0;
+ return -EINVAL;
+ }
+ break;
- default:
- ALOGW("Agc2GetParameter() unknown param %08x", param);
- status = -EINVAL;
- break;
+ default:
+ ALOGW("Agc2GetParameter() unknown param %08x", param);
+ status = -EINVAL;
+ break;
}
effect->session->config = effect->session->apm->GetConfig();
switch (param) {
- case AGC2_PARAM_FIXED_DIGITAL_GAIN:
- *(float *) pValue =
- (float)(effect->session->config.gain_controller2.fixed_digital.gain_db);
- ALOGV("Agc2GetParameter() target level %f dB", *(float *) pValue);
- break;
- case AGC2_PARAM_ADAPT_DIGI_LEVEL_ESTIMATOR:
- *(uint32_t *) pValue =
- (uint32_t)(effect->session->config.gain_controller2.adaptive_digital.
- level_estimator);
- ALOGV("Agc2GetParameter() level estimator %d",
- *(webrtc::AudioProcessing::Config::GainController2::LevelEstimator *) pValue);
- break;
- case AGC2_PARAM_ADAPT_DIGI_EXTRA_SATURATION_MARGIN:
- *(float *) pValue =
- (float)(effect->session->config.gain_controller2.adaptive_digital.
- extra_saturation_margin_db);
- ALOGV("Agc2GetParameter() extra saturation margin %f dB", *(float *) pValue);
- break;
- case AGC2_PARAM_PROPERTIES:
- pProperties->fixedDigitalGain =
- (float)(effect->session->config.gain_controller2.fixed_digital.gain_db);
- pProperties->level_estimator =
- (uint32_t)(effect->session->config.gain_controller2.adaptive_digital.
- level_estimator);
- pProperties->extraSaturationMargin =
- (float)(effect->session->config.gain_controller2.adaptive_digital.
- extra_saturation_margin_db);
- break;
- default:
- ALOGW("Agc2GetParameter() unknown param %d", param);
- status = -EINVAL;
- break;
+ case AGC2_PARAM_FIXED_DIGITAL_GAIN:
+ *(float*)pValue =
+ (float)(effect->session->config.gain_controller2.fixed_digital.gain_db);
+ ALOGV("Agc2GetParameter() target level %f dB", *(float*)pValue);
+ break;
+ case AGC2_PARAM_ADAPT_DIGI_LEVEL_ESTIMATOR:
+ *(uint32_t*)pValue = (uint32_t)(
+ effect->session->config.gain_controller2.adaptive_digital.level_estimator);
+ ALOGV("Agc2GetParameter() level estimator %d",
+ *(webrtc::AudioProcessing::Config::GainController2::LevelEstimator*)pValue);
+ break;
+ case AGC2_PARAM_ADAPT_DIGI_EXTRA_SATURATION_MARGIN:
+ *(float*)pValue = (float)(effect->session->config.gain_controller2.adaptive_digital
+ .extra_saturation_margin_db);
+ ALOGV("Agc2GetParameter() extra saturation margin %f dB", *(float*)pValue);
+ break;
+ case AGC2_PARAM_PROPERTIES:
+ pProperties->fixedDigitalGain =
+ (float)(effect->session->config.gain_controller2.fixed_digital.gain_db);
+ pProperties->level_estimator = (uint32_t)(
+ effect->session->config.gain_controller2.adaptive_digital.level_estimator);
+ pProperties->extraSaturationMargin =
+ (float)(effect->session->config.gain_controller2.adaptive_digital
+ .extra_saturation_margin_db);
+ break;
+ default:
+ ALOGW("Agc2GetParameter() unknown param %d", param);
+ status = -EINVAL;
+ break;
}
return status;
}
#endif
-int AgcGetParameter(preproc_effect_t *effect,
- void *pParam,
- uint32_t *pValueSize,
- void *pValue)
-{
+int AgcGetParameter(preproc_effect_t* effect, void* pParam, uint32_t* pValueSize, void* pValue) {
int status = 0;
- uint32_t param = *(uint32_t *)pParam;
- t_agc_settings *pProperties = (t_agc_settings *)pValue;
+ uint32_t param = *(uint32_t*)pParam;
+ t_agc_settings* pProperties = (t_agc_settings*)pValue;
#ifdef WEBRTC_LEGACY
- webrtc::GainControl *agc = static_cast<webrtc::GainControl *>(effect->engine);
+ webrtc::GainControl* agc = static_cast<webrtc::GainControl*>(effect->engine);
#endif
switch (param) {
- case AGC_PARAM_TARGET_LEVEL:
- case AGC_PARAM_COMP_GAIN:
- if (*pValueSize < sizeof(int16_t)) {
- *pValueSize = 0;
- return -EINVAL;
- }
- break;
- case AGC_PARAM_LIMITER_ENA:
- if (*pValueSize < sizeof(bool)) {
- *pValueSize = 0;
- return -EINVAL;
- }
- break;
- case AGC_PARAM_PROPERTIES:
- if (*pValueSize < sizeof(t_agc_settings)) {
- *pValueSize = 0;
- return -EINVAL;
- }
- break;
+ case AGC_PARAM_TARGET_LEVEL:
+ case AGC_PARAM_COMP_GAIN:
+ if (*pValueSize < sizeof(int16_t)) {
+ *pValueSize = 0;
+ return -EINVAL;
+ }
+ break;
+ case AGC_PARAM_LIMITER_ENA:
+ if (*pValueSize < sizeof(bool)) {
+ *pValueSize = 0;
+ return -EINVAL;
+ }
+ break;
+ case AGC_PARAM_PROPERTIES:
+ if (*pValueSize < sizeof(t_agc_settings)) {
+ *pValueSize = 0;
+ return -EINVAL;
+ }
+ break;
- default:
- ALOGW("AgcGetParameter() unknown param %08x", param);
- status = -EINVAL;
- break;
+ default:
+ ALOGW("AgcGetParameter() unknown param %08x", param);
+ status = -EINVAL;
+ break;
}
#ifdef WEBRTC_LEGACY
switch (param) {
- case AGC_PARAM_TARGET_LEVEL:
- *(int16_t *) pValue = (int16_t)(agc->target_level_dbfs() * -100);
- ALOGV("AgcGetParameter() target level %d milliBels", *(int16_t *) pValue);
- break;
- case AGC_PARAM_COMP_GAIN:
- *(int16_t *) pValue = (int16_t)(agc->compression_gain_db() * 100);
- ALOGV("AgcGetParameter() comp gain %d milliBels", *(int16_t *) pValue);
- break;
- case AGC_PARAM_LIMITER_ENA:
- *(bool *) pValue = (bool)agc->is_limiter_enabled();
- ALOGV("AgcGetParameter() limiter enabled %s",
- (*(int16_t *) pValue != 0) ? "true" : "false");
- break;
- case AGC_PARAM_PROPERTIES:
- pProperties->targetLevel = (int16_t)(agc->target_level_dbfs() * -100);
- pProperties->compGain = (int16_t)(agc->compression_gain_db() * 100);
- pProperties->limiterEnabled = (bool)agc->is_limiter_enabled();
- break;
- default:
- ALOGW("AgcGetParameter() unknown param %d", param);
- status = -EINVAL;
- break;
+ case AGC_PARAM_TARGET_LEVEL:
+ *(int16_t*)pValue = (int16_t)(agc->target_level_dbfs() * -100);
+ ALOGV("AgcGetParameter() target level %d milliBels", *(int16_t*)pValue);
+ break;
+ case AGC_PARAM_COMP_GAIN:
+ *(int16_t*)pValue = (int16_t)(agc->compression_gain_db() * 100);
+ ALOGV("AgcGetParameter() comp gain %d milliBels", *(int16_t*)pValue);
+ break;
+ case AGC_PARAM_LIMITER_ENA:
+ *(bool*)pValue = (bool)agc->is_limiter_enabled();
+ ALOGV("AgcGetParameter() limiter enabled %s",
+ (*(int16_t*)pValue != 0) ? "true" : "false");
+ break;
+ case AGC_PARAM_PROPERTIES:
+ pProperties->targetLevel = (int16_t)(agc->target_level_dbfs() * -100);
+ pProperties->compGain = (int16_t)(agc->compression_gain_db() * 100);
+ pProperties->limiterEnabled = (bool)agc->is_limiter_enabled();
+ break;
+ default:
+ ALOGW("AgcGetParameter() unknown param %d", param);
+ status = -EINVAL;
+ break;
}
#else
effect->session->config = effect->session->apm->GetConfig();
switch (param) {
- case AGC_PARAM_TARGET_LEVEL:
- *(int16_t *) pValue =
- (int16_t)(effect->session->config.gain_controller1.target_level_dbfs * -100);
- ALOGV("AgcGetParameter() target level %d milliBels", *(int16_t *) pValue);
- break;
- case AGC_PARAM_COMP_GAIN:
- *(int16_t *) pValue =
- (int16_t)(effect->session->config.gain_controller1.compression_gain_db * -100);
- ALOGV("AgcGetParameter() comp gain %d milliBels", *(int16_t *) pValue);
- break;
- case AGC_PARAM_LIMITER_ENA:
- *(bool *) pValue =
- (bool)(effect->session->config.gain_controller1.enable_limiter);
- ALOGV("AgcGetParameter() limiter enabled %s",
- (*(int16_t *) pValue != 0) ? "true" : "false");
- break;
- case AGC_PARAM_PROPERTIES:
- pProperties->targetLevel =
- (int16_t)(effect->session->config.gain_controller1.target_level_dbfs * -100);
- pProperties->compGain =
- (int16_t)(effect->session->config.gain_controller1.compression_gain_db * -100);
- pProperties->limiterEnabled =
- (bool)(effect->session->config.gain_controller1.enable_limiter);
- break;
- default:
- ALOGW("AgcGetParameter() unknown param %d", param);
- status = -EINVAL;
- break;
+ case AGC_PARAM_TARGET_LEVEL:
+ *(int16_t*)pValue =
+ (int16_t)(effect->session->config.gain_controller1.target_level_dbfs * -100);
+ ALOGV("AgcGetParameter() target level %d milliBels", *(int16_t*)pValue);
+ break;
+ case AGC_PARAM_COMP_GAIN:
+ *(int16_t*)pValue =
+ (int16_t)(effect->session->config.gain_controller1.compression_gain_db * -100);
+ ALOGV("AgcGetParameter() comp gain %d milliBels", *(int16_t*)pValue);
+ break;
+ case AGC_PARAM_LIMITER_ENA:
+ *(bool*)pValue = (bool)(effect->session->config.gain_controller1.enable_limiter);
+ ALOGV("AgcGetParameter() limiter enabled %s",
+ (*(int16_t*)pValue != 0) ? "true" : "false");
+ break;
+ case AGC_PARAM_PROPERTIES:
+ pProperties->targetLevel =
+ (int16_t)(effect->session->config.gain_controller1.target_level_dbfs * -100);
+ pProperties->compGain =
+ (int16_t)(effect->session->config.gain_controller1.compression_gain_db * -100);
+ pProperties->limiterEnabled =
+ (bool)(effect->session->config.gain_controller1.enable_limiter);
+ break;
+ default:
+ ALOGW("AgcGetParameter() unknown param %d", param);
+ status = -EINVAL;
+ break;
}
#endif
return status;
}
#ifndef WEBRTC_LEGACY
-int Agc2SetParameter (preproc_effect_t *effect, void *pParam, void *pValue)
-{
+int Agc2SetParameter(preproc_effect_t* effect, void* pParam, void* pValue) {
int status = 0;
- uint32_t param = *(uint32_t *)pParam;
+ uint32_t param = *(uint32_t*)pParam;
float valueFloat = 0.f;
- agc2_settings_t *pProperties = (agc2_settings_t *)pValue;
+ agc2_settings_t* pProperties = (agc2_settings_t*)pValue;
effect->session->config = effect->session->apm->GetConfig();
switch (param) {
- case AGC2_PARAM_FIXED_DIGITAL_GAIN:
- valueFloat = (float)(*(int32_t *) pValue);
- ALOGV("Agc2SetParameter() fixed digital gain %f dB", valueFloat);
- effect->session->config.gain_controller2.fixed_digital.gain_db = valueFloat;
- break;
- case AGC2_PARAM_ADAPT_DIGI_LEVEL_ESTIMATOR:
- ALOGV("Agc2SetParameter() level estimator %d", *(webrtc::AudioProcessing::Config::
- GainController2::LevelEstimator *) pValue);
- effect->session->config.gain_controller2.adaptive_digital.level_estimator =
- (*(webrtc::AudioProcessing::Config::GainController2::LevelEstimator *) pValue);
- break;
- case AGC2_PARAM_ADAPT_DIGI_EXTRA_SATURATION_MARGIN:
- valueFloat = (float)(*(int32_t *) pValue);
- ALOGV("Agc2SetParameter() extra saturation margin %f dB", valueFloat);
- effect->session->config.gain_controller2.adaptive_digital.extra_saturation_margin_db =
- valueFloat;
- break;
- case AGC2_PARAM_PROPERTIES:
- ALOGV("Agc2SetParameter() properties gain %f, level %d margin %f",
- pProperties->fixedDigitalGain,
- pProperties->level_estimator,
- pProperties->extraSaturationMargin);
- effect->session->config.gain_controller2.fixed_digital.gain_db =
- pProperties->fixedDigitalGain;
- effect->session->config.gain_controller2.adaptive_digital.level_estimator =
- (webrtc::AudioProcessing::Config::GainController2::LevelEstimator)pProperties->
- level_estimator;
- effect->session->config.gain_controller2.adaptive_digital.extra_saturation_margin_db =
- pProperties->extraSaturationMargin;
- break;
- default:
- ALOGW("Agc2SetParameter() unknown param %08x value %08x", param, *(uint32_t *)pValue);
- status = -EINVAL;
- break;
+ case AGC2_PARAM_FIXED_DIGITAL_GAIN:
+ valueFloat = (float)(*(int32_t*)pValue);
+ ALOGV("Agc2SetParameter() fixed digital gain %f dB", valueFloat);
+ effect->session->config.gain_controller2.fixed_digital.gain_db = valueFloat;
+ break;
+ case AGC2_PARAM_ADAPT_DIGI_LEVEL_ESTIMATOR:
+ ALOGV("Agc2SetParameter() level estimator %d",
+ *(webrtc::AudioProcessing::Config::GainController2::LevelEstimator*)pValue);
+ effect->session->config.gain_controller2.adaptive_digital.level_estimator =
+ (*(webrtc::AudioProcessing::Config::GainController2::LevelEstimator*)pValue);
+ break;
+ case AGC2_PARAM_ADAPT_DIGI_EXTRA_SATURATION_MARGIN:
+ valueFloat = (float)(*(int32_t*)pValue);
+ ALOGV("Agc2SetParameter() extra saturation margin %f dB", valueFloat);
+ effect->session->config.gain_controller2.adaptive_digital.extra_saturation_margin_db =
+ valueFloat;
+ break;
+ case AGC2_PARAM_PROPERTIES:
+ ALOGV("Agc2SetParameter() properties gain %f, level %d margin %f",
+ pProperties->fixedDigitalGain, pProperties->level_estimator,
+ pProperties->extraSaturationMargin);
+ effect->session->config.gain_controller2.fixed_digital.gain_db =
+ pProperties->fixedDigitalGain;
+ effect->session->config.gain_controller2.adaptive_digital.level_estimator =
+ (webrtc::AudioProcessing::Config::GainController2::LevelEstimator)
+ pProperties->level_estimator;
+ effect->session->config.gain_controller2.adaptive_digital.extra_saturation_margin_db =
+ pProperties->extraSaturationMargin;
+ break;
+ default:
+ ALOGW("Agc2SetParameter() unknown param %08x value %08x", param, *(uint32_t*)pValue);
+ status = -EINVAL;
+ break;
}
effect->session->apm->ApplyConfig(effect->session->config);
@@ -605,79 +569,72 @@
}
#endif
-int AgcSetParameter (preproc_effect_t *effect, void *pParam, void *pValue)
-{
+int AgcSetParameter(preproc_effect_t* effect, void* pParam, void* pValue) {
int status = 0;
#ifdef WEBRTC_LEGACY
- uint32_t param = *(uint32_t *)pParam;
- t_agc_settings *pProperties = (t_agc_settings *)pValue;
- webrtc::GainControl *agc = static_cast<webrtc::GainControl *>(effect->engine);
+ uint32_t param = *(uint32_t*)pParam;
+ t_agc_settings* pProperties = (t_agc_settings*)pValue;
+ webrtc::GainControl* agc = static_cast<webrtc::GainControl*>(effect->engine);
switch (param) {
- case AGC_PARAM_TARGET_LEVEL:
- ALOGV("AgcSetParameter() target level %d milliBels", *(int16_t *)pValue);
- status = agc->set_target_level_dbfs(-(*(int16_t *)pValue / 100));
- break;
- case AGC_PARAM_COMP_GAIN:
- ALOGV("AgcSetParameter() comp gain %d milliBels", *(int16_t *)pValue);
- status = agc->set_compression_gain_db(*(int16_t *)pValue / 100);
- break;
- case AGC_PARAM_LIMITER_ENA:
- ALOGV("AgcSetParameter() limiter enabled %s", *(bool *)pValue ? "true" : "false");
- status = agc->enable_limiter(*(bool *)pValue);
- break;
- case AGC_PARAM_PROPERTIES:
- ALOGV("AgcSetParameter() properties level %d, gain %d limiter %d",
- pProperties->targetLevel,
- pProperties->compGain,
- pProperties->limiterEnabled);
- status = agc->set_target_level_dbfs(-(pProperties->targetLevel / 100));
- if (status != 0) break;
- status = agc->set_compression_gain_db(pProperties->compGain / 100);
- if (status != 0) break;
- status = agc->enable_limiter(pProperties->limiterEnabled);
- break;
- default:
- ALOGW("AgcSetParameter() unknown param %08x value %08x", param, *(uint32_t *)pValue);
- status = -EINVAL;
- break;
+ case AGC_PARAM_TARGET_LEVEL:
+ ALOGV("AgcSetParameter() target level %d milliBels", *(int16_t*)pValue);
+ status = agc->set_target_level_dbfs(-(*(int16_t*)pValue / 100));
+ break;
+ case AGC_PARAM_COMP_GAIN:
+ ALOGV("AgcSetParameter() comp gain %d milliBels", *(int16_t*)pValue);
+ status = agc->set_compression_gain_db(*(int16_t*)pValue / 100);
+ break;
+ case AGC_PARAM_LIMITER_ENA:
+ ALOGV("AgcSetParameter() limiter enabled %s", *(bool*)pValue ? "true" : "false");
+ status = agc->enable_limiter(*(bool*)pValue);
+ break;
+ case AGC_PARAM_PROPERTIES:
+ ALOGV("AgcSetParameter() properties level %d, gain %d limiter %d",
+ pProperties->targetLevel, pProperties->compGain, pProperties->limiterEnabled);
+ status = agc->set_target_level_dbfs(-(pProperties->targetLevel / 100));
+ if (status != 0) break;
+ status = agc->set_compression_gain_db(pProperties->compGain / 100);
+ if (status != 0) break;
+ status = agc->enable_limiter(pProperties->limiterEnabled);
+ break;
+ default:
+ ALOGW("AgcSetParameter() unknown param %08x value %08x", param, *(uint32_t*)pValue);
+ status = -EINVAL;
+ break;
}
#else
- uint32_t param = *(uint32_t *)pParam;
- t_agc_settings *pProperties = (t_agc_settings *)pValue;
+ uint32_t param = *(uint32_t*)pParam;
+ t_agc_settings* pProperties = (t_agc_settings*)pValue;
effect->session->config = effect->session->apm->GetConfig();
switch (param) {
- case AGC_PARAM_TARGET_LEVEL:
- ALOGV("AgcSetParameter() target level %d milliBels", *(int16_t *)pValue);
- effect->session->config.gain_controller1.target_level_dbfs =
- (-(*(int16_t *)pValue / 100));
- break;
- case AGC_PARAM_COMP_GAIN:
- ALOGV("AgcSetParameter() comp gain %d milliBels", *(int16_t *)pValue);
- effect->session->config.gain_controller1.compression_gain_db =
- (*(int16_t *)pValue / 100);
- break;
- case AGC_PARAM_LIMITER_ENA:
- ALOGV("AgcSetParameter() limiter enabled %s", *(bool *)pValue ? "true" : "false");
- effect->session->config.gain_controller1.enable_limiter =
- (*(bool *)pValue);
- break;
- case AGC_PARAM_PROPERTIES:
- ALOGV("AgcSetParameter() properties level %d, gain %d limiter %d",
- pProperties->targetLevel,
- pProperties->compGain,
- pProperties->limiterEnabled);
- effect->session->config.gain_controller1.target_level_dbfs =
- -(pProperties->targetLevel / 100);
- effect->session->config.gain_controller1.compression_gain_db =
- pProperties->compGain / 100;
- effect->session->config.gain_controller1.enable_limiter =
- pProperties->limiterEnabled;
- break;
- default:
- ALOGW("AgcSetParameter() unknown param %08x value %08x", param, *(uint32_t *)pValue);
- status = -EINVAL;
- break;
+ case AGC_PARAM_TARGET_LEVEL:
+ ALOGV("AgcSetParameter() target level %d milliBels", *(int16_t*)pValue);
+ effect->session->config.gain_controller1.target_level_dbfs =
+ (-(*(int16_t*)pValue / 100));
+ break;
+ case AGC_PARAM_COMP_GAIN:
+ ALOGV("AgcSetParameter() comp gain %d milliBels", *(int16_t*)pValue);
+ effect->session->config.gain_controller1.compression_gain_db =
+ (*(int16_t*)pValue / 100);
+ break;
+ case AGC_PARAM_LIMITER_ENA:
+ ALOGV("AgcSetParameter() limiter enabled %s", *(bool*)pValue ? "true" : "false");
+ effect->session->config.gain_controller1.enable_limiter = (*(bool*)pValue);
+ break;
+ case AGC_PARAM_PROPERTIES:
+ ALOGV("AgcSetParameter() properties level %d, gain %d limiter %d",
+ pProperties->targetLevel, pProperties->compGain, pProperties->limiterEnabled);
+ effect->session->config.gain_controller1.target_level_dbfs =
+ -(pProperties->targetLevel / 100);
+ effect->session->config.gain_controller1.compression_gain_db =
+ pProperties->compGain / 100;
+ effect->session->config.gain_controller1.enable_limiter = pProperties->limiterEnabled;
+ break;
+ default:
+ ALOGW("AgcSetParameter() unknown param %08x value %08x", param, *(uint32_t*)pValue);
+ status = -EINVAL;
+ break;
}
effect->session->apm->ApplyConfig(effect->session->config);
#endif
@@ -688,18 +645,16 @@
}
#ifndef WEBRTC_LEGACY
-void Agc2Enable(preproc_effect_t *effect)
-{
+void Agc2Enable(preproc_effect_t* effect) {
effect->session->config = effect->session->apm->GetConfig();
effect->session->config.gain_controller2.enabled = true;
effect->session->apm->ApplyConfig(effect->session->config);
}
#endif
-void AgcEnable(preproc_effect_t *effect)
-{
+void AgcEnable(preproc_effect_t* effect) {
#ifdef WEBRTC_LEGACY
- webrtc::GainControl *agc = static_cast<webrtc::GainControl *>(effect->engine);
+ webrtc::GainControl* agc = static_cast<webrtc::GainControl*>(effect->engine);
ALOGV("AgcEnable agc %p", agc);
agc->Enable(true);
#else
@@ -710,19 +665,17 @@
}
#ifndef WEBRTC_LEGACY
-void Agc2Disable(preproc_effect_t *effect)
-{
+void Agc2Disable(preproc_effect_t* effect) {
effect->session->config = effect->session->apm->GetConfig();
effect->session->config.gain_controller2.enabled = false;
effect->session->apm->ApplyConfig(effect->session->config);
}
#endif
-void AgcDisable(preproc_effect_t *effect)
-{
+void AgcDisable(preproc_effect_t* effect) {
#ifdef WEBRTC_LEGACY
ALOGV("AgcDisable");
- webrtc::GainControl *agc = static_cast<webrtc::GainControl *>(effect->engine);
+ webrtc::GainControl* agc = static_cast<webrtc::GainControl*>(effect->engine);
agc->Enable(false);
#else
effect->session->config = effect->session->apm->GetConfig();
@@ -731,28 +684,13 @@
#endif
}
-static const preproc_ops_t sAgcOps = {
- AgcCreate,
- AgcInit,
- NULL,
- AgcEnable,
- AgcDisable,
- AgcSetParameter,
- AgcGetParameter,
- NULL
-};
+static const preproc_ops_t sAgcOps = {AgcCreate, AgcInit, NULL, AgcEnable, AgcDisable,
+ AgcSetParameter, AgcGetParameter, NULL};
#ifndef WEBRTC_LEGACY
-static const preproc_ops_t sAgc2Ops = {
- Agc2Create,
- Agc2Init,
- NULL,
- Agc2Enable,
- Agc2Disable,
- Agc2SetParameter,
- Agc2GetParameter,
- NULL
-};
+static const preproc_ops_t sAgc2Ops = {Agc2Create, Agc2Init, NULL,
+ Agc2Enable, Agc2Disable, Agc2SetParameter,
+ Agc2GetParameter, NULL};
#endif
//------------------------------------------------------------------------------
@@ -765,26 +703,23 @@
static const bool kAecDefaultComfortNoise = true;
#endif
-int AecInit (preproc_effect_t *effect)
-{
+int AecInit(preproc_effect_t* effect) {
ALOGV("AecInit");
#ifdef WEBRTC_LEGACY
- webrtc::EchoControlMobile *aec = static_cast<webrtc::EchoControlMobile *>(effect->engine);
+ webrtc::EchoControlMobile* aec = static_cast<webrtc::EchoControlMobile*>(effect->engine);
aec->set_routing_mode(kAecDefaultMode);
aec->enable_comfort_noise(kAecDefaultComfortNoise);
#else
- effect->session->config =
- effect->session->apm->GetConfig() ;
+ effect->session->config = effect->session->apm->GetConfig();
effect->session->config.echo_canceller.mobile_mode = true;
effect->session->apm->ApplyConfig(effect->session->config);
#endif
return 0;
}
-int AecCreate(preproc_effect_t *effect)
-{
+int AecCreate(preproc_effect_t* effect) {
#ifdef WEBRTC_LEGACY
- webrtc::EchoControlMobile *aec = effect->session->apm->echo_control_mobile();
+ webrtc::EchoControlMobile* aec = effect->session->apm->echo_control_mobile();
ALOGV("AecCreate got aec %p", aec);
if (aec == NULL) {
ALOGW("AgcCreate Error");
@@ -792,76 +727,68 @@
}
effect->engine = static_cast<preproc_fx_handle_t>(aec);
#endif
- AecInit (effect);
+ AecInit(effect);
return 0;
}
-int AecGetParameter(preproc_effect_t *effect,
- void *pParam,
- uint32_t *pValueSize,
- void *pValue)
-{
+int AecGetParameter(preproc_effect_t* effect, void* pParam, uint32_t* pValueSize, void* pValue) {
int status = 0;
- uint32_t param = *(uint32_t *)pParam;
+ uint32_t param = *(uint32_t*)pParam;
if (*pValueSize < sizeof(uint32_t)) {
return -EINVAL;
}
switch (param) {
- case AEC_PARAM_ECHO_DELAY:
- case AEC_PARAM_PROPERTIES:
- *(uint32_t *)pValue = 1000 * effect->session->apm->stream_delay_ms();
- ALOGV("AecGetParameter() echo delay %d us", *(uint32_t *)pValue);
- break;
+ case AEC_PARAM_ECHO_DELAY:
+ case AEC_PARAM_PROPERTIES:
+ *(uint32_t*)pValue = 1000 * effect->session->apm->stream_delay_ms();
+ ALOGV("AecGetParameter() echo delay %d us", *(uint32_t*)pValue);
+ break;
#ifndef WEBRTC_LEGACY
- case AEC_PARAM_MOBILE_MODE:
- effect->session->config =
- effect->session->apm->GetConfig() ;
- *(uint32_t *)pValue = effect->session->config.echo_canceller.mobile_mode;
- ALOGV("AecGetParameter() mobile mode %d us", *(uint32_t *)pValue);
- break;
+ case AEC_PARAM_MOBILE_MODE:
+ effect->session->config = effect->session->apm->GetConfig();
+ *(uint32_t*)pValue = effect->session->config.echo_canceller.mobile_mode;
+ ALOGV("AecGetParameter() mobile mode %d us", *(uint32_t*)pValue);
+ break;
#endif
- default:
- ALOGW("AecGetParameter() unknown param %08x value %08x", param, *(uint32_t *)pValue);
- status = -EINVAL;
- break;
+ default:
+ ALOGW("AecGetParameter() unknown param %08x value %08x", param, *(uint32_t*)pValue);
+ status = -EINVAL;
+ break;
}
return status;
}
-int AecSetParameter (preproc_effect_t *effect, void *pParam, void *pValue)
-{
+int AecSetParameter(preproc_effect_t* effect, void* pParam, void* pValue) {
int status = 0;
- uint32_t param = *(uint32_t *)pParam;
- uint32_t value = *(uint32_t *)pValue;
+ uint32_t param = *(uint32_t*)pParam;
+ uint32_t value = *(uint32_t*)pValue;
switch (param) {
- case AEC_PARAM_ECHO_DELAY:
- case AEC_PARAM_PROPERTIES:
- status = effect->session->apm->set_stream_delay_ms(value/1000);
- ALOGV("AecSetParameter() echo delay %d us, status %d", value, status);
- break;
+ case AEC_PARAM_ECHO_DELAY:
+ case AEC_PARAM_PROPERTIES:
+ status = effect->session->apm->set_stream_delay_ms(value / 1000);
+ ALOGV("AecSetParameter() echo delay %d us, status %d", value, status);
+ break;
#ifndef WEBRTC_LEGACY
- case AEC_PARAM_MOBILE_MODE:
- effect->session->config =
- effect->session->apm->GetConfig() ;
- effect->session->config.echo_canceller.mobile_mode = value;
- ALOGV("AecSetParameter() mobile mode %d us", value);
- effect->session->apm->ApplyConfig(effect->session->config);
- break;
+ case AEC_PARAM_MOBILE_MODE:
+ effect->session->config = effect->session->apm->GetConfig();
+ effect->session->config.echo_canceller.mobile_mode = value;
+ ALOGV("AecSetParameter() mobile mode %d us", value);
+ effect->session->apm->ApplyConfig(effect->session->config);
+ break;
#endif
- default:
- ALOGW("AecSetParameter() unknown param %08x value %08x", param, *(uint32_t *)pValue);
- status = -EINVAL;
- break;
+ default:
+ ALOGW("AecSetParameter() unknown param %08x value %08x", param, *(uint32_t*)pValue);
+ status = -EINVAL;
+ break;
}
return status;
}
-void AecEnable(preproc_effect_t *effect)
-{
+void AecEnable(preproc_effect_t* effect) {
#ifdef WEBRTC_LEGACY
- webrtc::EchoControlMobile *aec = static_cast<webrtc::EchoControlMobile *>(effect->engine);
+ webrtc::EchoControlMobile* aec = static_cast<webrtc::EchoControlMobile*>(effect->engine);
ALOGV("AecEnable aec %p", aec);
aec->Enable(true);
#else
@@ -871,11 +798,10 @@
#endif
}
-void AecDisable(preproc_effect_t *effect)
-{
+void AecDisable(preproc_effect_t* effect) {
#ifdef WEBRTC_LEGACY
ALOGV("AecDisable");
- webrtc::EchoControlMobile *aec = static_cast<webrtc::EchoControlMobile *>(effect->engine);
+ webrtc::EchoControlMobile* aec = static_cast<webrtc::EchoControlMobile*>(effect->engine);
aec->Enable(false);
#else
effect->session->config = effect->session->apm->GetConfig();
@@ -884,12 +810,12 @@
#endif
}
-int AecSetDevice(preproc_effect_t *effect, uint32_t device)
-{
+int AecSetDevice(preproc_effect_t* effect, uint32_t device) {
ALOGV("AecSetDevice %08x", device);
#ifdef WEBRTC_LEGACY
- webrtc::EchoControlMobile *aec = static_cast<webrtc::EchoControlMobile *>(effect->engine);
- webrtc::EchoControlMobile::RoutingMode mode = webrtc::EchoControlMobile::kQuietEarpieceOrHeadset;
+ webrtc::EchoControlMobile* aec = static_cast<webrtc::EchoControlMobile*>(effect->engine);
+ webrtc::EchoControlMobile::RoutingMode mode =
+ webrtc::EchoControlMobile::kQuietEarpieceOrHeadset;
#endif
if (audio_is_input_device(device)) {
@@ -897,34 +823,27 @@
}
#ifdef WEBRTC_LEGACY
- switch(device) {
- case AUDIO_DEVICE_OUT_EARPIECE:
- mode = webrtc::EchoControlMobile::kEarpiece;
- break;
- case AUDIO_DEVICE_OUT_SPEAKER:
- mode = webrtc::EchoControlMobile::kSpeakerphone;
- break;
- case AUDIO_DEVICE_OUT_WIRED_HEADSET:
- case AUDIO_DEVICE_OUT_WIRED_HEADPHONE:
- case AUDIO_DEVICE_OUT_USB_HEADSET:
- default:
- break;
+ switch (device) {
+ case AUDIO_DEVICE_OUT_EARPIECE:
+ mode = webrtc::EchoControlMobile::kEarpiece;
+ break;
+ case AUDIO_DEVICE_OUT_SPEAKER:
+ mode = webrtc::EchoControlMobile::kSpeakerphone;
+ break;
+ case AUDIO_DEVICE_OUT_WIRED_HEADSET:
+ case AUDIO_DEVICE_OUT_WIRED_HEADPHONE:
+ case AUDIO_DEVICE_OUT_USB_HEADSET:
+ default:
+ break;
}
aec->set_routing_mode(mode);
#endif
return 0;
}
-static const preproc_ops_t sAecOps = {
- AecCreate,
- AecInit,
- NULL,
- AecEnable,
- AecDisable,
- AecSetParameter,
- AecGetParameter,
- AecSetDevice
-};
+static const preproc_ops_t sAecOps = {AecCreate, AecInit, NULL,
+ AecEnable, AecDisable, AecSetParameter,
+ AecGetParameter, AecSetDevice};
//------------------------------------------------------------------------------
// Noise Suppression (NS)
@@ -934,14 +853,13 @@
static const webrtc::NoiseSuppression::Level kNsDefaultLevel = webrtc::NoiseSuppression::kModerate;
#else
static const webrtc::AudioProcessing::Config::NoiseSuppression::Level kNsDefaultLevel =
- webrtc::AudioProcessing::Config::NoiseSuppression::kModerate;
+ webrtc::AudioProcessing::Config::NoiseSuppression::kModerate;
#endif
-int NsInit (preproc_effect_t *effect)
-{
+int NsInit(preproc_effect_t* effect) {
ALOGV("NsInit");
#ifdef WEBRTC_LEGACY
- webrtc::NoiseSuppression *ns = static_cast<webrtc::NoiseSuppression *>(effect->engine);
+ webrtc::NoiseSuppression* ns = static_cast<webrtc::NoiseSuppression*>(effect->engine);
ns->set_level(kNsDefaultLevel);
webrtc::Config config;
std::vector<webrtc::Point> geometry;
@@ -951,27 +869,22 @@
geometry.push_back(webrtc::Point(0.01f, 0.f, 0.f));
geometry.push_back(webrtc::Point(0.03f, 0.f, 0.f));
// The geometry needs to be set with Beamforming enabled.
- config.Set<webrtc::Beamforming>(
- new webrtc::Beamforming(true, geometry));
+ config.Set<webrtc::Beamforming>(new webrtc::Beamforming(true, geometry));
effect->session->apm->SetExtraOptions(config);
- config.Set<webrtc::Beamforming>(
- new webrtc::Beamforming(false, geometry));
+ config.Set<webrtc::Beamforming>(new webrtc::Beamforming(false, geometry));
effect->session->apm->SetExtraOptions(config);
#else
- effect->session->config =
- effect->session->apm->GetConfig() ;
- effect->session->config.noise_suppression.level =
- kNsDefaultLevel;
+ effect->session->config = effect->session->apm->GetConfig();
+ effect->session->config.noise_suppression.level = kNsDefaultLevel;
effect->session->apm->ApplyConfig(effect->session->config);
#endif
effect->type = NS_TYPE_SINGLE_CHANNEL;
return 0;
}
-int NsCreate(preproc_effect_t *effect)
-{
+int NsCreate(preproc_effect_t* effect) {
#ifdef WEBRTC_LEGACY
- webrtc::NoiseSuppression *ns = effect->session->apm->noise_suppression();
+ webrtc::NoiseSuppression* ns = effect->session->apm->noise_suppression();
ALOGV("NsCreate got ns %p", ns);
if (ns == NULL) {
ALOGW("AgcCreate Error");
@@ -979,37 +892,31 @@
}
effect->engine = static_cast<preproc_fx_handle_t>(ns);
#endif
- NsInit (effect);
+ NsInit(effect);
return 0;
}
-int NsGetParameter(preproc_effect_t *effect __unused,
- void *pParam __unused,
- uint32_t *pValueSize __unused,
- void *pValue __unused)
-{
+int NsGetParameter(preproc_effect_t* effect __unused, void* pParam __unused,
+ uint32_t* pValueSize __unused, void* pValue __unused) {
int status = 0;
return status;
}
-int NsSetParameter (preproc_effect_t *effect, void *pParam, void *pValue)
-{
+int NsSetParameter(preproc_effect_t* effect, void* pParam, void* pValue) {
int status = 0;
#ifdef WEBRTC_LEGACY
- webrtc::NoiseSuppression *ns = static_cast<webrtc::NoiseSuppression *>(effect->engine);
- uint32_t param = *(uint32_t *)pParam;
- uint32_t value = *(uint32_t *)pValue;
- switch(param) {
+ webrtc::NoiseSuppression* ns = static_cast<webrtc::NoiseSuppression*>(effect->engine);
+ uint32_t param = *(uint32_t*)pParam;
+ uint32_t value = *(uint32_t*)pValue;
+ switch (param) {
case NS_PARAM_LEVEL:
ns->set_level((webrtc::NoiseSuppression::Level)value);
ALOGV("NsSetParameter() level %d", value);
break;
- case NS_PARAM_TYPE:
- {
+ case NS_PARAM_TYPE: {
webrtc::Config config;
std::vector<webrtc::Point> geometry;
- bool is_beamforming_enabled =
- value == NS_TYPE_MULTI_CHANNEL && ns->is_enabled();
+ bool is_beamforming_enabled = value == NS_TYPE_MULTI_CHANNEL && ns->is_enabled();
config.Set<webrtc::Beamforming>(
new webrtc::Beamforming(is_beamforming_enabled, geometry));
effect->session->apm->SetExtraOptions(config);
@@ -1022,14 +929,13 @@
status = -EINVAL;
}
#else
- uint32_t param = *(uint32_t *)pParam;
- uint32_t value = *(uint32_t *)pValue;
- effect->session->config =
- effect->session->apm->GetConfig();
+ uint32_t param = *(uint32_t*)pParam;
+ uint32_t value = *(uint32_t*)pValue;
+ effect->session->config = effect->session->apm->GetConfig();
switch (param) {
case NS_PARAM_LEVEL:
effect->session->config.noise_suppression.level =
- (webrtc::AudioProcessing::Config::NoiseSuppression::Level)value;
+ (webrtc::AudioProcessing::Config::NoiseSuppression::Level)value;
ALOGV("NsSetParameter() level %d", value);
break;
default:
@@ -1042,10 +948,9 @@
return status;
}
-void NsEnable(preproc_effect_t *effect)
-{
+void NsEnable(preproc_effect_t* effect) {
#ifdef WEBRTC_LEGACY
- webrtc::NoiseSuppression *ns = static_cast<webrtc::NoiseSuppression *>(effect->engine);
+ webrtc::NoiseSuppression* ns = static_cast<webrtc::NoiseSuppression*>(effect->engine);
ALOGV("NsEnable ns %p", ns);
ns->Enable(true);
if (effect->type == NS_TYPE_MULTI_CHANNEL) {
@@ -1055,137 +960,118 @@
effect->session->apm->SetExtraOptions(config);
}
#else
- effect->session->config =
- effect->session->apm->GetConfig();
+ effect->session->config = effect->session->apm->GetConfig();
effect->session->config.noise_suppression.enabled = true;
effect->session->apm->ApplyConfig(effect->session->config);
#endif
}
-void NsDisable(preproc_effect_t *effect)
-{
+void NsDisable(preproc_effect_t* effect) {
ALOGV("NsDisable");
#ifdef WEBRTC_LEGACY
- webrtc::NoiseSuppression *ns = static_cast<webrtc::NoiseSuppression *>(effect->engine);
+ webrtc::NoiseSuppression* ns = static_cast<webrtc::NoiseSuppression*>(effect->engine);
ns->Enable(false);
webrtc::Config config;
std::vector<webrtc::Point> geometry;
config.Set<webrtc::Beamforming>(new webrtc::Beamforming(false, geometry));
effect->session->apm->SetExtraOptions(config);
#else
- effect->session->config =
- effect->session->apm->GetConfig();
+ effect->session->config = effect->session->apm->GetConfig();
effect->session->config.noise_suppression.enabled = false;
effect->session->apm->ApplyConfig(effect->session->config);
#endif
}
-static const preproc_ops_t sNsOps = {
- NsCreate,
- NsInit,
- NULL,
- NsEnable,
- NsDisable,
- NsSetParameter,
- NsGetParameter,
- NULL
-};
+static const preproc_ops_t sNsOps = {NsCreate, NsInit, NULL, NsEnable,
+ NsDisable, NsSetParameter, NsGetParameter, NULL};
-
-
-static const preproc_ops_t *sPreProcOps[PREPROC_NUM_EFFECTS] = {
- &sAgcOps,
+static const preproc_ops_t* sPreProcOps[PREPROC_NUM_EFFECTS] = {&sAgcOps,
#ifndef WEBRTC_LEGACY
- &sAgc2Ops,
+ &sAgc2Ops,
#endif
- &sAecOps,
- &sNsOps
-};
-
+ &sAecOps, &sNsOps};
//------------------------------------------------------------------------------
// Effect functions
//------------------------------------------------------------------------------
-void Session_SetProcEnabled(preproc_session_t *session, uint32_t procId, bool enabled);
+void Session_SetProcEnabled(preproc_session_t* session, uint32_t procId, bool enabled);
extern "C" const struct effect_interface_s sEffectInterface;
extern "C" const struct effect_interface_s sEffectInterfaceReverse;
-#define BAD_STATE_ABORT(from, to) \
- LOG_ALWAYS_FATAL("Bad state transition from %d to %d", from, to);
+#define BAD_STATE_ABORT(from, to) LOG_ALWAYS_FATAL("Bad state transition from %d to %d", from, to);
-int Effect_SetState(preproc_effect_t *effect, uint32_t state)
-{
+int Effect_SetState(preproc_effect_t* effect, uint32_t state) {
int status = 0;
ALOGV("Effect_SetState proc %d, new %d old %d", effect->procId, state, effect->state);
- switch(state) {
- case PREPROC_EFFECT_STATE_INIT:
- switch(effect->state) {
- case PREPROC_EFFECT_STATE_ACTIVE:
- effect->ops->disable(effect);
- Session_SetProcEnabled(effect->session, effect->procId, false);
+ switch (state) {
+ case PREPROC_EFFECT_STATE_INIT:
+ switch (effect->state) {
+ case PREPROC_EFFECT_STATE_ACTIVE:
+ effect->ops->disable(effect);
+ Session_SetProcEnabled(effect->session, effect->procId, false);
+ break;
+ case PREPROC_EFFECT_STATE_CONFIG:
+ case PREPROC_EFFECT_STATE_CREATED:
+ case PREPROC_EFFECT_STATE_INIT:
+ break;
+ default:
+ BAD_STATE_ABORT(effect->state, state);
+ }
+ break;
+ case PREPROC_EFFECT_STATE_CREATED:
+ switch (effect->state) {
+ case PREPROC_EFFECT_STATE_INIT:
+ status = effect->ops->create(effect);
+ break;
+ case PREPROC_EFFECT_STATE_CREATED:
+ case PREPROC_EFFECT_STATE_ACTIVE:
+ case PREPROC_EFFECT_STATE_CONFIG:
+ ALOGE("Effect_SetState invalid transition");
+ status = -ENOSYS;
+ break;
+ default:
+ BAD_STATE_ABORT(effect->state, state);
+ }
break;
case PREPROC_EFFECT_STATE_CONFIG:
- case PREPROC_EFFECT_STATE_CREATED:
- case PREPROC_EFFECT_STATE_INIT:
+ switch (effect->state) {
+ case PREPROC_EFFECT_STATE_INIT:
+ ALOGE("Effect_SetState invalid transition");
+ status = -ENOSYS;
+ break;
+ case PREPROC_EFFECT_STATE_ACTIVE:
+ effect->ops->disable(effect);
+ Session_SetProcEnabled(effect->session, effect->procId, false);
+ break;
+ case PREPROC_EFFECT_STATE_CREATED:
+ case PREPROC_EFFECT_STATE_CONFIG:
+ break;
+ default:
+ BAD_STATE_ABORT(effect->state, state);
+ }
+ break;
+ case PREPROC_EFFECT_STATE_ACTIVE:
+ switch (effect->state) {
+ case PREPROC_EFFECT_STATE_INIT:
+ case PREPROC_EFFECT_STATE_CREATED:
+ ALOGE("Effect_SetState invalid transition");
+ status = -ENOSYS;
+ break;
+ case PREPROC_EFFECT_STATE_ACTIVE:
+ // enabling an already enabled effect is just ignored
+ break;
+ case PREPROC_EFFECT_STATE_CONFIG:
+ effect->ops->enable(effect);
+ Session_SetProcEnabled(effect->session, effect->procId, true);
+ break;
+ default:
+ BAD_STATE_ABORT(effect->state, state);
+ }
break;
default:
BAD_STATE_ABORT(effect->state, state);
- }
- break;
- case PREPROC_EFFECT_STATE_CREATED:
- switch(effect->state) {
- case PREPROC_EFFECT_STATE_INIT:
- status = effect->ops->create(effect);
- break;
- case PREPROC_EFFECT_STATE_CREATED:
- case PREPROC_EFFECT_STATE_ACTIVE:
- case PREPROC_EFFECT_STATE_CONFIG:
- ALOGE("Effect_SetState invalid transition");
- status = -ENOSYS;
- break;
- default:
- BAD_STATE_ABORT(effect->state, state);
- }
- break;
- case PREPROC_EFFECT_STATE_CONFIG:
- switch(effect->state) {
- case PREPROC_EFFECT_STATE_INIT:
- ALOGE("Effect_SetState invalid transition");
- status = -ENOSYS;
- break;
- case PREPROC_EFFECT_STATE_ACTIVE:
- effect->ops->disable(effect);
- Session_SetProcEnabled(effect->session, effect->procId, false);
- break;
- case PREPROC_EFFECT_STATE_CREATED:
- case PREPROC_EFFECT_STATE_CONFIG:
- break;
- default:
- BAD_STATE_ABORT(effect->state, state);
- }
- break;
- case PREPROC_EFFECT_STATE_ACTIVE:
- switch(effect->state) {
- case PREPROC_EFFECT_STATE_INIT:
- case PREPROC_EFFECT_STATE_CREATED:
- ALOGE("Effect_SetState invalid transition");
- status = -ENOSYS;
- break;
- case PREPROC_EFFECT_STATE_ACTIVE:
- // enabling an already enabled effect is just ignored
- break;
- case PREPROC_EFFECT_STATE_CONFIG:
- effect->ops->enable(effect);
- Session_SetProcEnabled(effect->session, effect->procId, true);
- break;
- default:
- BAD_STATE_ABORT(effect->state, state);
- }
- break;
- default:
- BAD_STATE_ABORT(effect->state, state);
}
if (status == 0) {
effect->state = state;
@@ -1193,8 +1079,7 @@
return status;
}
-int Effect_Init(preproc_effect_t *effect, uint32_t procId)
-{
+int Effect_Init(preproc_effect_t* effect, uint32_t procId) {
if (HasReverseStream(procId)) {
effect->itfe = &sEffectInterfaceReverse;
} else {
@@ -1206,21 +1091,17 @@
return 0;
}
-int Effect_Create(preproc_effect_t *effect,
- preproc_session_t *session,
- effect_handle_t *interface)
-{
+int Effect_Create(preproc_effect_t* effect, preproc_session_t* session,
+ effect_handle_t* interface) {
effect->session = session;
*interface = (effect_handle_t)&effect->itfe;
return Effect_SetState(effect, PREPROC_EFFECT_STATE_CREATED);
}
-int Effect_Release(preproc_effect_t *effect)
-{
+int Effect_Release(preproc_effect_t* effect) {
return Effect_SetState(effect, PREPROC_EFFECT_STATE_INIT);
}
-
//------------------------------------------------------------------------------
// Session functions
//------------------------------------------------------------------------------
@@ -1230,8 +1111,7 @@
static const int kPreprocDefaultSr = 16000;
static const int kPreProcDefaultCnl = 1;
-int Session_Init(preproc_session_t *session)
-{
+int Session_Init(preproc_session_t* session) {
size_t i;
int status = 0;
@@ -1248,11 +1128,8 @@
return status;
}
-
-extern "C" int Session_CreateEffect(preproc_session_t *session,
- int32_t procId,
- effect_handle_t *interface)
-{
+extern "C" int Session_CreateEffect(preproc_session_t* session, int32_t procId,
+ effect_handle_t* interface) {
int status = -ENOMEM;
ALOGV("Session_CreateEffect procId %d, createdMsk %08x", procId, session->createdMsk);
@@ -1265,10 +1142,10 @@
goto error;
}
const webrtc::ProcessingConfig processing_config = {
- {{kPreprocDefaultSr, kPreProcDefaultCnl},
- {kPreprocDefaultSr, kPreProcDefaultCnl},
- {kPreprocDefaultSr, kPreProcDefaultCnl},
- {kPreprocDefaultSr, kPreProcDefaultCnl}}};
+ {{kPreprocDefaultSr, kPreProcDefaultCnl},
+ {kPreprocDefaultSr, kPreProcDefaultCnl},
+ {kPreprocDefaultSr, kPreProcDefaultCnl},
+ {kPreprocDefaultSr, kPreProcDefaultCnl}}};
session->apm->Initialize(processing_config);
session->procFrame = new webrtc::AudioFrame();
if (session->procFrame == NULL) {
@@ -1335,7 +1212,7 @@
goto error;
}
ALOGV("Session_CreateEffect OK");
- session->createdMsk |= (1<<procId);
+ session->createdMsk |= (1 << procId);
return status;
error:
@@ -1346,7 +1223,7 @@
delete session->procFrame;
session->procFrame = NULL;
delete session->apm;
- session->apm = NULL; // NOLINT(clang-analyzer-cplusplus.NewDelete)
+ session->apm = NULL; // NOLINT(clang-analyzer-cplusplus.NewDelete)
#else
delete session->apm;
session->apm = NULL;
@@ -1355,11 +1232,9 @@
return status;
}
-int Session_ReleaseEffect(preproc_session_t *session,
- preproc_effect_t *fx)
-{
+int Session_ReleaseEffect(preproc_session_t* session, preproc_effect_t* fx) {
ALOGW_IF(Effect_Release(fx) != 0, " Effect_Release() failed for proc ID %d", fx->procId);
- session->createdMsk &= ~(1<<fx->procId);
+ session->createdMsk &= ~(1 << fx->procId);
if (session->createdMsk == 0) {
#ifdef WEBRTC_LEGACY
delete session->apm;
@@ -1397,9 +1272,7 @@
return 0;
}
-
-int Session_SetConfig(preproc_session_t *session, effect_config_t *config)
-{
+int Session_SetConfig(preproc_session_t* session, effect_config_t* config) {
uint32_t inCnl = audio_channel_count_from_in_mask(config->inputCfg.channels);
uint32_t outCnl = audio_channel_count_from_in_mask(config->outputCfg.channels);
@@ -1409,8 +1282,8 @@
return -EINVAL;
}
- ALOGV("Session_SetConfig sr %d cnl %08x",
- config->inputCfg.samplingRate, config->inputCfg.channels);
+ ALOGV("Session_SetConfig sr %d cnl %08x", config->inputCfg.samplingRate,
+ config->inputCfg.channels);
#ifdef WEBRTC_LEGACY
int status;
#endif
@@ -1418,8 +1291,7 @@
// AEC implementation is limited to 16kHz
if (config->inputCfg.samplingRate >= 32000 && !(session->createdMsk & (1 << PREPROC_AEC))) {
session->apmSamplingRate = 32000;
- } else
- if (config->inputCfg.samplingRate >= 16000) {
+ } else if (config->inputCfg.samplingRate >= 16000) {
session->apmSamplingRate = 16000;
} else if (config->inputCfg.samplingRate >= 8000) {
session->apmSamplingRate = 8000;
@@ -1427,10 +1299,10 @@
#ifdef WEBRTC_LEGACY
const webrtc::ProcessingConfig processing_config = {
- {{static_cast<int>(session->apmSamplingRate), inCnl},
- {static_cast<int>(session->apmSamplingRate), outCnl},
- {static_cast<int>(session->apmSamplingRate), inCnl},
- {static_cast<int>(session->apmSamplingRate), inCnl}}};
+ {{static_cast<int>(session->apmSamplingRate), inCnl},
+ {static_cast<int>(session->apmSamplingRate), outCnl},
+ {static_cast<int>(session->apmSamplingRate), inCnl},
+ {static_cast<int>(session->apmSamplingRate), inCnl}}};
status = session->apm->Initialize(processing_config);
if (status < 0) {
return -EINVAL;
@@ -1443,11 +1315,11 @@
session->frameCount = session->apmFrameCount;
} else {
#ifdef WEBRTC_LEGACY
- session->frameCount = (session->apmFrameCount * session->samplingRate) /
- session->apmSamplingRate + 1;
+ session->frameCount =
+ (session->apmFrameCount * session->samplingRate) / session->apmSamplingRate + 1;
#else
- session->frameCount = (session->apmFrameCount * session->samplingRate) /
- session->apmSamplingRate;
+ session->frameCount =
+ (session->apmFrameCount * session->samplingRate) / session->apmSamplingRate;
#endif
}
session->inChannelCount = inCnl;
@@ -1477,7 +1349,6 @@
session->framesIn = 0;
session->framesOut = 0;
-
#ifdef WEBRTC_LEGACY
if (session->inResampler != NULL) {
speex_resampler_destroy(session->inResampler);
@@ -1493,36 +1364,30 @@
}
if (session->samplingRate != session->apmSamplingRate) {
int error;
- session->inResampler = speex_resampler_init(session->inChannelCount,
- session->samplingRate,
- session->apmSamplingRate,
- RESAMPLER_QUALITY,
- &error);
+ session->inResampler =
+ speex_resampler_init(session->inChannelCount, session->samplingRate,
+ session->apmSamplingRate, RESAMPLER_QUALITY, &error);
if (session->inResampler == NULL) {
ALOGW("Session_SetConfig Cannot create speex resampler: %s",
- speex_resampler_strerror(error));
+ speex_resampler_strerror(error));
return -EINVAL;
}
- session->outResampler = speex_resampler_init(session->outChannelCount,
- session->apmSamplingRate,
- session->samplingRate,
- RESAMPLER_QUALITY,
- &error);
+ session->outResampler =
+ speex_resampler_init(session->outChannelCount, session->apmSamplingRate,
+ session->samplingRate, RESAMPLER_QUALITY, &error);
if (session->outResampler == NULL) {
ALOGW("Session_SetConfig Cannot create speex resampler: %s",
- speex_resampler_strerror(error));
+ speex_resampler_strerror(error));
speex_resampler_destroy(session->inResampler);
session->inResampler = NULL;
return -EINVAL;
}
- session->revResampler = speex_resampler_init(session->inChannelCount,
- session->samplingRate,
- session->apmSamplingRate,
- RESAMPLER_QUALITY,
- &error);
+ session->revResampler =
+ speex_resampler_init(session->inChannelCount, session->samplingRate,
+ session->apmSamplingRate, RESAMPLER_QUALITY, &error);
if (session->revResampler == NULL) {
ALOGW("Session_SetConfig Cannot create speex resampler: %s",
- speex_resampler_strerror(error));
+ speex_resampler_strerror(error));
speex_resampler_destroy(session->inResampler);
session->inResampler = NULL;
speex_resampler_destroy(session->outResampler);
@@ -1536,8 +1401,7 @@
return 0;
}
-void Session_GetConfig(preproc_session_t *session, effect_config_t *config)
-{
+void Session_GetConfig(preproc_session_t* session, effect_config_t* config) {
memset(config, 0, sizeof(effect_config_t));
config->inputCfg.samplingRate = config->outputCfg.samplingRate = session->samplingRate;
config->inputCfg.format = config->outputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
@@ -1548,31 +1412,30 @@
(EFFECT_CONFIG_SMP_RATE | EFFECT_CONFIG_CHANNELS | EFFECT_CONFIG_FORMAT);
}
-int Session_SetReverseConfig(preproc_session_t *session, effect_config_t *config)
-{
+int Session_SetReverseConfig(preproc_session_t* session, effect_config_t* config) {
if (config->inputCfg.samplingRate != config->outputCfg.samplingRate ||
- config->inputCfg.format != config->outputCfg.format ||
- config->inputCfg.format != AUDIO_FORMAT_PCM_16_BIT) {
+ config->inputCfg.format != config->outputCfg.format ||
+ config->inputCfg.format != AUDIO_FORMAT_PCM_16_BIT) {
return -EINVAL;
}
- ALOGV("Session_SetReverseConfig sr %d cnl %08x",
- config->inputCfg.samplingRate, config->inputCfg.channels);
+ ALOGV("Session_SetReverseConfig sr %d cnl %08x", config->inputCfg.samplingRate,
+ config->inputCfg.channels);
if (session->state < PREPROC_SESSION_STATE_CONFIG) {
return -ENOSYS;
}
if (config->inputCfg.samplingRate != session->samplingRate ||
- config->inputCfg.format != AUDIO_FORMAT_PCM_16_BIT) {
+ config->inputCfg.format != AUDIO_FORMAT_PCM_16_BIT) {
return -EINVAL;
}
uint32_t inCnl = audio_channel_count_from_out_mask(config->inputCfg.channels);
#ifdef WEBRTC_LEGACY
const webrtc::ProcessingConfig processing_config = {
- {{static_cast<int>(session->apmSamplingRate), session->inChannelCount},
- {static_cast<int>(session->apmSamplingRate), session->outChannelCount},
- {static_cast<int>(session->apmSamplingRate), inCnl},
- {static_cast<int>(session->apmSamplingRate), inCnl}}};
+ {{static_cast<int>(session->apmSamplingRate), session->inChannelCount},
+ {static_cast<int>(session->apmSamplingRate), session->outChannelCount},
+ {static_cast<int>(session->apmSamplingRate), inCnl},
+ {static_cast<int>(session->apmSamplingRate), inCnl}}};
int status = session->apm->Initialize(processing_config);
if (status < 0) {
return -EINVAL;
@@ -1590,8 +1453,7 @@
return 0;
}
-void Session_GetReverseConfig(preproc_session_t *session, effect_config_t *config)
-{
+void Session_GetReverseConfig(preproc_session_t* session, effect_config_t* config) {
memset(config, 0, sizeof(effect_config_t));
config->inputCfg.samplingRate = config->outputCfg.samplingRate = session->samplingRate;
config->inputCfg.format = config->outputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
@@ -1601,10 +1463,9 @@
(EFFECT_CONFIG_SMP_RATE | EFFECT_CONFIG_CHANNELS | EFFECT_CONFIG_FORMAT);
}
-void Session_SetProcEnabled(preproc_session_t *session, uint32_t procId, bool enabled)
-{
+void Session_SetProcEnabled(preproc_session_t* session, uint32_t procId, bool enabled) {
if (enabled) {
- if(session->enabledMsk == 0) {
+ if (session->enabledMsk == 0) {
session->framesIn = 0;
#ifdef WEBRTC_LEGACY
if (session->inResampler != NULL) {
@@ -1632,8 +1493,8 @@
session->revEnabledMsk &= ~(1 << procId);
}
}
- ALOGV("Session_SetProcEnabled proc %d, enabled %d enabledMsk %08x revEnabledMsk %08x",
- procId, enabled, session->enabledMsk, session->revEnabledMsk);
+ ALOGV("Session_SetProcEnabled proc %d, enabled %d enabledMsk %08x revEnabledMsk %08x", procId,
+ enabled, session->enabledMsk, session->revEnabledMsk);
session->processedMsk = 0;
if (HasReverseStream(procId)) {
session->revProcessedMsk = 0;
@@ -1647,8 +1508,7 @@
static int sInitStatus = 1;
static preproc_session_t sSessions[PREPROC_NUM_SESSIONS];
-preproc_session_t *PreProc_GetSession(int32_t procId, int32_t sessionId, int32_t ioId)
-{
+preproc_session_t* PreProc_GetSession(int32_t procId, int32_t sessionId, int32_t ioId) {
size_t i;
for (i = 0; i < PREPROC_NUM_SESSIONS; i++) {
if (sSessions[i].id == sessionId) {
@@ -1668,7 +1528,6 @@
return NULL;
}
-
int PreProc_Init() {
size_t i;
int status = 0;
@@ -1683,8 +1542,7 @@
return sInitStatus;
}
-const effect_descriptor_t *PreProc_GetDescriptor(const effect_uuid_t *uuid)
-{
+const effect_descriptor_t* PreProc_GetDescriptor(const effect_uuid_t* uuid) {
size_t i;
for (i = 0; i < PREPROC_NUM_EFFECTS; i++) {
if (memcmp(&sDescriptors[i]->uuid, uuid, sizeof(effect_uuid_t)) == 0) {
@@ -1694,35 +1552,31 @@
return NULL;
}
-
extern "C" {
//------------------------------------------------------------------------------
// Effect Control Interface Implementation
//------------------------------------------------------------------------------
-int PreProcessingFx_Process(effect_handle_t self,
- audio_buffer_t *inBuffer,
- audio_buffer_t *outBuffer)
-{
- preproc_effect_t * effect = (preproc_effect_t *)self;
+int PreProcessingFx_Process(effect_handle_t self, audio_buffer_t* inBuffer,
+ audio_buffer_t* outBuffer) {
+ preproc_effect_t* effect = (preproc_effect_t*)self;
- if (effect == NULL){
+ if (effect == NULL) {
ALOGV("PreProcessingFx_Process() ERROR effect == NULL");
return -EINVAL;
}
- preproc_session_t * session = (preproc_session_t *)effect->session;
+ preproc_session_t* session = (preproc_session_t*)effect->session;
- if (inBuffer == NULL || inBuffer->raw == NULL ||
- outBuffer == NULL || outBuffer->raw == NULL){
+ if (inBuffer == NULL || inBuffer->raw == NULL || outBuffer == NULL || outBuffer->raw == NULL) {
ALOGW("PreProcessingFx_Process() ERROR bad pointer");
return -EINVAL;
}
- session->processedMsk |= (1<<effect->procId);
+ session->processedMsk |= (1 << effect->procId);
-// ALOGV("PreProcessingFx_Process In %d frames enabledMsk %08x processedMsk %08x",
-// inBuffer->frameCount, session->enabledMsk, session->processedMsk);
+ // ALOGV("PreProcessingFx_Process In %d frames enabledMsk %08x processedMsk %08x",
+ // inBuffer->frameCount, session->enabledMsk, session->processedMsk);
if ((session->processedMsk & session->enabledMsk) == session->enabledMsk) {
effect->session->processedMsk = 0;
@@ -1733,11 +1587,9 @@
if (outBuffer->frameCount < fr) {
fr = outBuffer->frameCount;
}
- memcpy(outBuffer->s16,
- session->outBuf,
- fr * session->outChannelCount * sizeof(int16_t));
- memmove(session->outBuf,
- session->outBuf + fr * session->outChannelCount,
+ memcpy(outBuffer->s16, session->outBuf,
+ fr * session->outChannelCount * sizeof(int16_t));
+ memmove(session->outBuf, session->outBuf + fr * session->outChannelCount,
(session->framesOut - fr) * session->outChannelCount * sizeof(int16_t));
session->framesOut -= fr;
framesWr += fr;
@@ -1755,10 +1607,11 @@
fr = inBuffer->frameCount;
}
if (session->inBufSize < session->framesIn + fr) {
- int16_t *buf;
+ int16_t* buf;
session->inBufSize = session->framesIn + fr;
- buf = (int16_t *)realloc(session->inBuf,
- session->inBufSize * session->inChannelCount * sizeof(int16_t));
+ buf = (int16_t*)realloc(
+ session->inBuf,
+ session->inBufSize * session->inChannelCount * sizeof(int16_t));
if (buf == NULL) {
session->framesIn = 0;
free(session->inBuf);
@@ -1767,14 +1620,13 @@
}
session->inBuf = buf;
}
- memcpy(session->inBuf + session->framesIn * session->inChannelCount,
- inBuffer->s16,
+ memcpy(session->inBuf + session->framesIn * session->inChannelCount, inBuffer->s16,
fr * session->inChannelCount * sizeof(int16_t));
#ifdef DUAL_MIC_TEST
pthread_mutex_lock(&gPcmDumpLock);
if (gPcmDumpFh != NULL) {
- fwrite(inBuffer->raw,
- fr * session->inChannelCount * sizeof(int16_t), 1, gPcmDumpFh);
+ fwrite(inBuffer->raw, fr * session->inChannelCount * sizeof(int16_t), 1,
+ gPcmDumpFh);
}
pthread_mutex_unlock(&gPcmDumpLock);
#endif
@@ -1787,21 +1639,13 @@
spx_uint32_t frIn = session->framesIn;
spx_uint32_t frOut = session->apmFrameCount;
if (session->inChannelCount == 1) {
- speex_resampler_process_int(session->inResampler,
- 0,
- session->inBuf,
- &frIn,
- session->procFrame->data_,
- &frOut);
+ speex_resampler_process_int(session->inResampler, 0, session->inBuf, &frIn,
+ session->procFrame->data_, &frOut);
} else {
- speex_resampler_process_interleaved_int(session->inResampler,
- session->inBuf,
- &frIn,
- session->procFrame->data_,
- &frOut);
+ speex_resampler_process_interleaved_int(session->inResampler, session->inBuf, &frIn,
+ session->procFrame->data_, &frOut);
}
- memmove(session->inBuf,
- session->inBuf + frIn * session->inChannelCount,
+ memmove(session->inBuf, session->inBuf + frIn * session->inChannelCount,
(session->framesIn - frIn) * session->inChannelCount * sizeof(int16_t));
session->framesIn -= frIn;
} else {
@@ -1810,14 +1654,13 @@
fr = inBuffer->frameCount;
}
memcpy(session->procFrame->data_ + session->framesIn * session->inChannelCount,
- inBuffer->s16,
- fr * session->inChannelCount * sizeof(int16_t));
+ inBuffer->s16, fr * session->inChannelCount * sizeof(int16_t));
#ifdef DUAL_MIC_TEST
pthread_mutex_lock(&gPcmDumpLock);
if (gPcmDumpFh != NULL) {
- fwrite(inBuffer->raw,
- fr * session->inChannelCount * sizeof(int16_t), 1, gPcmDumpFh);
+ fwrite(inBuffer->raw, fr * session->inChannelCount * sizeof(int16_t), 1,
+ gPcmDumpFh);
}
pthread_mutex_unlock(&gPcmDumpLock);
#endif
@@ -1844,11 +1687,11 @@
}
session->framesIn = 0;
if (int status = effect->session->apm->ProcessStream(
- (const int16_t* const)inBuffer->s16,
- (const webrtc::StreamConfig)effect->session->inputConfig,
- (const webrtc::StreamConfig)effect->session->outputConfig,
- (int16_t* const)outBuffer->s16);
- status != 0) {
+ (const int16_t* const)inBuffer->s16,
+ (const webrtc::StreamConfig)effect->session->inputConfig,
+ (const webrtc::StreamConfig)effect->session->outputConfig,
+ (int16_t* const)outBuffer->s16);
+ status != 0) {
ALOGE("Process Stream failed with error %d\n", status);
return status;
}
@@ -1856,10 +1699,11 @@
#endif
if (session->outBufSize < session->framesOut + session->frameCount) {
- int16_t *buf;
+ int16_t* buf;
session->outBufSize = session->framesOut + session->frameCount;
- buf = (int16_t *)realloc(session->outBuf,
- session->outBufSize * session->outChannelCount * sizeof(int16_t));
+ buf = (int16_t*)realloc(
+ session->outBuf,
+ session->outBufSize * session->outChannelCount * sizeof(int16_t));
if (buf == NULL) {
session->framesOut = 0;
free(session->outBuf);
@@ -1874,18 +1718,13 @@
spx_uint32_t frIn = session->apmFrameCount;
spx_uint32_t frOut = session->frameCount;
if (session->inChannelCount == 1) {
- speex_resampler_process_int(session->outResampler,
- 0,
- session->procFrame->data_,
- &frIn,
- session->outBuf + session->framesOut * session->outChannelCount,
- &frOut);
+ speex_resampler_process_int(
+ session->outResampler, 0, session->procFrame->data_, &frIn,
+ session->outBuf + session->framesOut * session->outChannelCount, &frOut);
} else {
- speex_resampler_process_interleaved_int(session->outResampler,
- session->procFrame->data_,
- &frIn,
- session->outBuf + session->framesOut * session->outChannelCount,
- &frOut);
+ speex_resampler_process_interleaved_int(
+ session->outResampler, session->procFrame->data_, &frIn,
+ session->outBuf + session->framesOut * session->outChannelCount, &frOut);
}
session->framesOut += frOut;
} else {
@@ -1901,11 +1740,9 @@
if (framesRq - framesWr < fr) {
fr = framesRq - framesWr;
}
- memcpy(outBuffer->s16 + framesWr * session->outChannelCount,
- session->outBuf,
- fr * session->outChannelCount * sizeof(int16_t));
- memmove(session->outBuf,
- session->outBuf + fr * session->outChannelCount,
+ memcpy(outBuffer->s16 + framesWr * session->outChannelCount, session->outBuf,
+ fr * session->outChannelCount * sizeof(int16_t));
+ memmove(session->outBuf, session->outBuf + fr * session->outChannelCount,
(session->framesOut - fr) * session->outChannelCount * sizeof(int16_t));
session->framesOut -= fr;
outBuffer->frameCount += fr;
@@ -1916,39 +1753,32 @@
}
}
-int PreProcessingFx_Command(effect_handle_t self,
- uint32_t cmdCode,
- uint32_t cmdSize,
- void *pCmdData,
- uint32_t *replySize,
- void *pReplyData)
-{
- preproc_effect_t * effect = (preproc_effect_t *) self;
+int PreProcessingFx_Command(effect_handle_t self, uint32_t cmdCode, uint32_t cmdSize,
+ void* pCmdData, uint32_t* replySize, void* pReplyData) {
+ preproc_effect_t* effect = (preproc_effect_t*)self;
- if (effect == NULL){
+ if (effect == NULL) {
return -EINVAL;
}
- //ALOGV("PreProcessingFx_Command: command %d cmdSize %d",cmdCode, cmdSize);
+ // ALOGV("PreProcessingFx_Command: command %d cmdSize %d",cmdCode, cmdSize);
- switch (cmdCode){
+ switch (cmdCode) {
case EFFECT_CMD_INIT:
- if (pReplyData == NULL || *replySize != sizeof(int)){
+ if (pReplyData == NULL || *replySize != sizeof(int)) {
return -EINVAL;
}
if (effect->ops->init) {
effect->ops->init(effect);
}
- *(int *)pReplyData = 0;
+ *(int*)pReplyData = 0;
break;
case EFFECT_CMD_SET_CONFIG: {
- if (pCmdData == NULL||
- cmdSize != sizeof(effect_config_t)||
- pReplyData == NULL||
- *replySize != sizeof(int)){
+ if (pCmdData == NULL || cmdSize != sizeof(effect_config_t) || pReplyData == NULL ||
+ *replySize != sizeof(int)) {
ALOGV("PreProcessingFx_Command cmdCode Case: "
- "EFFECT_CMD_SET_CONFIG: ERROR");
+ "EFFECT_CMD_SET_CONFIG: ERROR");
return -EINVAL;
}
#ifdef DUAL_MIC_TEST
@@ -1959,55 +1789,51 @@
effect->session->enabledMsk = 0;
}
#endif
- *(int *)pReplyData = Session_SetConfig(effect->session, (effect_config_t *)pCmdData);
+ *(int*)pReplyData = Session_SetConfig(effect->session, (effect_config_t*)pCmdData);
#ifdef DUAL_MIC_TEST
if (gDualMicEnabled) {
effect->session->enabledMsk = enabledMsk;
}
#endif
- if (*(int *)pReplyData != 0) {
+ if (*(int*)pReplyData != 0) {
break;
}
if (effect->state != PREPROC_EFFECT_STATE_ACTIVE) {
- *(int *)pReplyData = Effect_SetState(effect, PREPROC_EFFECT_STATE_CONFIG);
+ *(int*)pReplyData = Effect_SetState(effect, PREPROC_EFFECT_STATE_CONFIG);
}
- } break;
+ } break;
case EFFECT_CMD_GET_CONFIG:
- if (pReplyData == NULL ||
- *replySize != sizeof(effect_config_t)) {
+ if (pReplyData == NULL || *replySize != sizeof(effect_config_t)) {
ALOGV("\tLVM_ERROR : PreProcessingFx_Command cmdCode Case: "
- "EFFECT_CMD_GET_CONFIG: ERROR");
+ "EFFECT_CMD_GET_CONFIG: ERROR");
return -EINVAL;
}
- Session_GetConfig(effect->session, (effect_config_t *)pReplyData);
+ Session_GetConfig(effect->session, (effect_config_t*)pReplyData);
break;
case EFFECT_CMD_SET_CONFIG_REVERSE:
- if (pCmdData == NULL ||
- cmdSize != sizeof(effect_config_t) ||
- pReplyData == NULL ||
+ if (pCmdData == NULL || cmdSize != sizeof(effect_config_t) || pReplyData == NULL ||
*replySize != sizeof(int)) {
ALOGV("PreProcessingFx_Command cmdCode Case: "
- "EFFECT_CMD_SET_CONFIG_REVERSE: ERROR");
+ "EFFECT_CMD_SET_CONFIG_REVERSE: ERROR");
return -EINVAL;
}
- *(int *)pReplyData = Session_SetReverseConfig(effect->session,
- (effect_config_t *)pCmdData);
- if (*(int *)pReplyData != 0) {
+ *(int*)pReplyData =
+ Session_SetReverseConfig(effect->session, (effect_config_t*)pCmdData);
+ if (*(int*)pReplyData != 0) {
break;
}
break;
case EFFECT_CMD_GET_CONFIG_REVERSE:
- if (pReplyData == NULL ||
- *replySize != sizeof(effect_config_t)){
+ if (pReplyData == NULL || *replySize != sizeof(effect_config_t)) {
ALOGV("PreProcessingFx_Command cmdCode Case: "
- "EFFECT_CMD_GET_CONFIG_REVERSE: ERROR");
+ "EFFECT_CMD_GET_CONFIG_REVERSE: ERROR");
return -EINVAL;
}
- Session_GetReverseConfig(effect->session, (effect_config_t *)pCmdData);
+ Session_GetReverseConfig(effect->session, (effect_config_t*)pCmdData);
break;
case EFFECT_CMD_RESET:
@@ -2017,80 +1843,74 @@
break;
case EFFECT_CMD_GET_PARAM: {
- effect_param_t *p = (effect_param_t *)pCmdData;
+ effect_param_t* p = (effect_param_t*)pCmdData;
if (pCmdData == NULL || cmdSize < sizeof(effect_param_t) ||
- cmdSize < (sizeof(effect_param_t) + p->psize) ||
- pReplyData == NULL || replySize == NULL ||
- *replySize < (sizeof(effect_param_t) + p->psize)){
+ cmdSize < (sizeof(effect_param_t) + p->psize) || pReplyData == NULL ||
+ replySize == NULL || *replySize < (sizeof(effect_param_t) + p->psize)) {
ALOGV("PreProcessingFx_Command cmdCode Case: "
- "EFFECT_CMD_GET_PARAM: ERROR");
+ "EFFECT_CMD_GET_PARAM: ERROR");
return -EINVAL;
}
memcpy(pReplyData, pCmdData, sizeof(effect_param_t) + p->psize);
- p = (effect_param_t *)pReplyData;
+ p = (effect_param_t*)pReplyData;
int voffset = ((p->psize - 1) / sizeof(int32_t) + 1) * sizeof(int32_t);
if (effect->ops->get_parameter) {
- p->status = effect->ops->get_parameter(effect, p->data,
- &p->vsize,
- p->data + voffset);
+ p->status =
+ effect->ops->get_parameter(effect, p->data, &p->vsize, p->data + voffset);
*replySize = sizeof(effect_param_t) + voffset + p->vsize;
}
} break;
- case EFFECT_CMD_SET_PARAM:{
- if (pCmdData == NULL||
- cmdSize < sizeof(effect_param_t) ||
- pReplyData == NULL || replySize == NULL ||
- *replySize != sizeof(int32_t)){
+ case EFFECT_CMD_SET_PARAM: {
+ if (pCmdData == NULL || cmdSize < sizeof(effect_param_t) || pReplyData == NULL ||
+ replySize == NULL || *replySize != sizeof(int32_t)) {
ALOGV("PreProcessingFx_Command cmdCode Case: "
- "EFFECT_CMD_SET_PARAM: ERROR");
+ "EFFECT_CMD_SET_PARAM: ERROR");
return -EINVAL;
}
- effect_param_t *p = (effect_param_t *) pCmdData;
+ effect_param_t* p = (effect_param_t*)pCmdData;
- if (p->psize != sizeof(int32_t)){
+ if (p->psize != sizeof(int32_t)) {
ALOGV("PreProcessingFx_Command cmdCode Case: "
- "EFFECT_CMD_SET_PARAM: ERROR, psize is not sizeof(int32_t)");
+ "EFFECT_CMD_SET_PARAM: ERROR, psize is not sizeof(int32_t)");
return -EINVAL;
}
if (effect->ops->set_parameter) {
- *(int *)pReplyData = effect->ops->set_parameter(effect,
- (void *)p->data,
- p->data + p->psize);
+ *(int*)pReplyData =
+ effect->ops->set_parameter(effect, (void*)p->data, p->data + p->psize);
}
} break;
case EFFECT_CMD_ENABLE:
- if (pReplyData == NULL || replySize == NULL || *replySize != sizeof(int)){
+ if (pReplyData == NULL || replySize == NULL || *replySize != sizeof(int)) {
ALOGV("PreProcessingFx_Command cmdCode Case: EFFECT_CMD_ENABLE: ERROR");
return -EINVAL;
}
- *(int *)pReplyData = Effect_SetState(effect, PREPROC_EFFECT_STATE_ACTIVE);
+ *(int*)pReplyData = Effect_SetState(effect, PREPROC_EFFECT_STATE_ACTIVE);
break;
case EFFECT_CMD_DISABLE:
- if (pReplyData == NULL || replySize == NULL || *replySize != sizeof(int)){
+ if (pReplyData == NULL || replySize == NULL || *replySize != sizeof(int)) {
ALOGV("PreProcessingFx_Command cmdCode Case: EFFECT_CMD_DISABLE: ERROR");
return -EINVAL;
}
- *(int *)pReplyData = Effect_SetState(effect, PREPROC_EFFECT_STATE_CONFIG);
+ *(int*)pReplyData = Effect_SetState(effect, PREPROC_EFFECT_STATE_CONFIG);
break;
case EFFECT_CMD_SET_DEVICE:
case EFFECT_CMD_SET_INPUT_DEVICE:
- if (pCmdData == NULL ||
- cmdSize != sizeof(uint32_t)) {
+ if (pCmdData == NULL || cmdSize != sizeof(uint32_t)) {
ALOGV("PreProcessingFx_Command cmdCode Case: EFFECT_CMD_SET_DEVICE: ERROR");
return -EINVAL;
}
if (effect->ops->set_device) {
- effect->ops->set_device(effect, *(uint32_t *)pCmdData);
+ effect->ops->set_device(effect, *(uint32_t*)pCmdData);
}
break;
@@ -2101,30 +1921,30 @@
#ifdef DUAL_MIC_TEST
///// test commands start
case PREPROC_CMD_DUAL_MIC_ENABLE: {
- if (pCmdData == NULL|| cmdSize != sizeof(uint32_t) ||
- pReplyData == NULL || replySize == NULL) {
+ if (pCmdData == NULL || cmdSize != sizeof(uint32_t) || pReplyData == NULL ||
+ replySize == NULL) {
ALOGE("PreProcessingFx_Command cmdCode Case: "
- "PREPROC_CMD_DUAL_MIC_ENABLE: ERROR");
+ "PREPROC_CMD_DUAL_MIC_ENABLE: ERROR");
*replySize = 0;
return -EINVAL;
}
- gDualMicEnabled = *(bool *)pCmdData;
+ gDualMicEnabled = *(bool*)pCmdData;
if (gDualMicEnabled) {
effect->aux_channels_on = sHasAuxChannels[effect->procId];
} else {
effect->aux_channels_on = false;
}
- effect->cur_channel_config = (effect->session->inChannelCount == 1) ?
- CHANNEL_CFG_MONO : CHANNEL_CFG_STEREO;
+ effect->cur_channel_config =
+ (effect->session->inChannelCount == 1) ? CHANNEL_CFG_MONO : CHANNEL_CFG_STEREO;
ALOGV("PREPROC_CMD_DUAL_MIC_ENABLE: %s", gDualMicEnabled ? "enabled" : "disabled");
*replySize = sizeof(int);
- *(int *)pReplyData = 0;
- } break;
+ *(int*)pReplyData = 0;
+ } break;
case PREPROC_CMD_DUAL_MIC_PCM_DUMP_START: {
- if (pCmdData == NULL|| pReplyData == NULL || replySize == NULL) {
+ if (pCmdData == NULL || pReplyData == NULL || replySize == NULL) {
ALOGE("PreProcessingFx_Command cmdCode Case: "
- "PREPROC_CMD_DUAL_MIC_PCM_DUMP_START: ERROR");
+ "PREPROC_CMD_DUAL_MIC_PCM_DUMP_START: ERROR");
*replySize = 0;
return -EINVAL;
}
@@ -2133,20 +1953,19 @@
fclose(gPcmDumpFh);
gPcmDumpFh = NULL;
}
- char *path = strndup((char *)pCmdData, cmdSize);
- gPcmDumpFh = fopen((char *)path, "wb");
+ char* path = strndup((char*)pCmdData, cmdSize);
+ gPcmDumpFh = fopen((char*)path, "wb");
pthread_mutex_unlock(&gPcmDumpLock);
- ALOGV("PREPROC_CMD_DUAL_MIC_PCM_DUMP_START: path %s gPcmDumpFh %p",
- path, gPcmDumpFh);
+ ALOGV("PREPROC_CMD_DUAL_MIC_PCM_DUMP_START: path %s gPcmDumpFh %p", path, gPcmDumpFh);
ALOGE_IF(gPcmDumpFh <= 0, "gPcmDumpFh open error %d %s", errno, strerror(errno));
free(path);
*replySize = sizeof(int);
- *(int *)pReplyData = 0;
- } break;
+ *(int*)pReplyData = 0;
+ } break;
case PREPROC_CMD_DUAL_MIC_PCM_DUMP_STOP: {
if (pReplyData == NULL || replySize == NULL) {
ALOGE("PreProcessingFx_Command cmdCode Case: "
- "PREPROC_CMD_DUAL_MIC_PCM_DUMP_STOP: ERROR");
+ "PREPROC_CMD_DUAL_MIC_PCM_DUMP_STOP: ERROR");
*replySize = 0;
return -EINVAL;
}
@@ -2158,118 +1977,116 @@
pthread_mutex_unlock(&gPcmDumpLock);
ALOGV("PREPROC_CMD_DUAL_MIC_PCM_DUMP_STOP");
*replySize = sizeof(int);
- *(int *)pReplyData = 0;
- } break;
- ///// test commands end
+ *(int*)pReplyData = 0;
+ } break;
+ ///// test commands end
case EFFECT_CMD_GET_FEATURE_SUPPORTED_CONFIGS: {
- if(!gDualMicEnabled) {
+ if (!gDualMicEnabled) {
return -EINVAL;
}
- if (pCmdData == NULL|| cmdSize != 2 * sizeof(uint32_t) ||
- pReplyData == NULL || replySize == NULL) {
+ if (pCmdData == NULL || cmdSize != 2 * sizeof(uint32_t) || pReplyData == NULL ||
+ replySize == NULL) {
ALOGE("PreProcessingFx_Command cmdCode Case: "
- "EFFECT_CMD_GET_FEATURE_SUPPORTED_CONFIGS: ERROR");
+ "EFFECT_CMD_GET_FEATURE_SUPPORTED_CONFIGS: ERROR");
*replySize = 0;
return -EINVAL;
}
- if (*(uint32_t *)pCmdData != EFFECT_FEATURE_AUX_CHANNELS ||
- !effect->aux_channels_on) {
+ if (*(uint32_t*)pCmdData != EFFECT_FEATURE_AUX_CHANNELS || !effect->aux_channels_on) {
ALOGV("PreProcessingFx_Command feature EFFECT_FEATURE_AUX_CHANNELS not supported by"
- " fx %d", effect->procId);
- *(uint32_t *)pReplyData = -ENOSYS;
+ " fx %d",
+ effect->procId);
+ *(uint32_t*)pReplyData = -ENOSYS;
*replySize = sizeof(uint32_t);
break;
}
- size_t num_configs = *((uint32_t *)pCmdData + 1);
- if (*replySize < (2 * sizeof(uint32_t) +
- num_configs * sizeof(channel_config_t))) {
+ size_t num_configs = *((uint32_t*)pCmdData + 1);
+ if (*replySize < (2 * sizeof(uint32_t) + num_configs * sizeof(channel_config_t))) {
*replySize = 0;
return -EINVAL;
}
- *((uint32_t *)pReplyData + 1) = CHANNEL_CFG_CNT;
+ *((uint32_t*)pReplyData + 1) = CHANNEL_CFG_CNT;
if (num_configs < CHANNEL_CFG_CNT ||
- *replySize < (2 * sizeof(uint32_t) +
- CHANNEL_CFG_CNT * sizeof(channel_config_t))) {
- *(uint32_t *)pReplyData = -ENOMEM;
+ *replySize < (2 * sizeof(uint32_t) + CHANNEL_CFG_CNT * sizeof(channel_config_t))) {
+ *(uint32_t*)pReplyData = -ENOMEM;
} else {
num_configs = CHANNEL_CFG_CNT;
- *(uint32_t *)pReplyData = 0;
+ *(uint32_t*)pReplyData = 0;
}
ALOGV("PreProcessingFx_Command EFFECT_CMD_GET_FEATURE_SUPPORTED_CONFIGS num config %d",
num_configs);
*replySize = 2 * sizeof(uint32_t) + num_configs * sizeof(channel_config_t);
- *((uint32_t *)pReplyData + 1) = num_configs;
- memcpy((uint32_t *)pReplyData + 2, &sDualMicConfigs, num_configs * sizeof(channel_config_t));
- } break;
+ *((uint32_t*)pReplyData + 1) = num_configs;
+ memcpy((uint32_t*)pReplyData + 2, &sDualMicConfigs,
+ num_configs * sizeof(channel_config_t));
+ } break;
case EFFECT_CMD_GET_FEATURE_CONFIG:
- if(!gDualMicEnabled) {
+ if (!gDualMicEnabled) {
return -EINVAL;
}
- if (pCmdData == NULL|| cmdSize != sizeof(uint32_t) ||
- pReplyData == NULL || replySize == NULL ||
- *replySize < sizeof(uint32_t) + sizeof(channel_config_t)) {
+ if (pCmdData == NULL || cmdSize != sizeof(uint32_t) || pReplyData == NULL ||
+ replySize == NULL || *replySize < sizeof(uint32_t) + sizeof(channel_config_t)) {
ALOGE("PreProcessingFx_Command cmdCode Case: "
- "EFFECT_CMD_GET_FEATURE_CONFIG: ERROR");
+ "EFFECT_CMD_GET_FEATURE_CONFIG: ERROR");
return -EINVAL;
}
- if (*(uint32_t *)pCmdData != EFFECT_FEATURE_AUX_CHANNELS || !effect->aux_channels_on) {
- *(uint32_t *)pReplyData = -ENOSYS;
+ if (*(uint32_t*)pCmdData != EFFECT_FEATURE_AUX_CHANNELS || !effect->aux_channels_on) {
+ *(uint32_t*)pReplyData = -ENOSYS;
*replySize = sizeof(uint32_t);
break;
}
ALOGV("PreProcessingFx_Command EFFECT_CMD_GET_FEATURE_CONFIG");
- *(uint32_t *)pReplyData = 0;
+ *(uint32_t*)pReplyData = 0;
*replySize = sizeof(uint32_t) + sizeof(channel_config_t);
- memcpy((uint32_t *)pReplyData + 1,
- &sDualMicConfigs[effect->cur_channel_config],
+ memcpy((uint32_t*)pReplyData + 1, &sDualMicConfigs[effect->cur_channel_config],
sizeof(channel_config_t));
break;
case EFFECT_CMD_SET_FEATURE_CONFIG: {
ALOGV("PreProcessingFx_Command EFFECT_CMD_SET_FEATURE_CONFIG: "
- "gDualMicEnabled %d effect->aux_channels_on %d",
+ "gDualMicEnabled %d effect->aux_channels_on %d",
gDualMicEnabled, effect->aux_channels_on);
- if(!gDualMicEnabled) {
+ if (!gDualMicEnabled) {
return -EINVAL;
}
- if (pCmdData == NULL|| cmdSize != (sizeof(uint32_t) + sizeof(channel_config_t)) ||
- pReplyData == NULL || replySize == NULL ||
- *replySize < sizeof(uint32_t)) {
+ if (pCmdData == NULL || cmdSize != (sizeof(uint32_t) + sizeof(channel_config_t)) ||
+ pReplyData == NULL || replySize == NULL || *replySize < sizeof(uint32_t)) {
ALOGE("PreProcessingFx_Command cmdCode Case: "
- "EFFECT_CMD_SET_FEATURE_CONFIG: ERROR\n"
- "pCmdData %p cmdSize %d pReplyData %p replySize %p *replySize %d",
- pCmdData, cmdSize, pReplyData, replySize, replySize ? *replySize : -1);
+ "EFFECT_CMD_SET_FEATURE_CONFIG: ERROR\n"
+ "pCmdData %p cmdSize %d pReplyData %p replySize %p *replySize %d",
+ pCmdData, cmdSize, pReplyData, replySize, replySize ? *replySize : -1);
return -EINVAL;
}
*replySize = sizeof(uint32_t);
- if (*(uint32_t *)pCmdData != EFFECT_FEATURE_AUX_CHANNELS || !effect->aux_channels_on) {
- *(uint32_t *)pReplyData = -ENOSYS;
+ if (*(uint32_t*)pCmdData != EFFECT_FEATURE_AUX_CHANNELS || !effect->aux_channels_on) {
+ *(uint32_t*)pReplyData = -ENOSYS;
ALOGV("PreProcessingFx_Command cmdCode Case: "
- "EFFECT_CMD_SET_FEATURE_CONFIG: ERROR\n"
- "CmdData %d effect->aux_channels_on %d",
- *(uint32_t *)pCmdData, effect->aux_channels_on);
+ "EFFECT_CMD_SET_FEATURE_CONFIG: ERROR\n"
+ "CmdData %d effect->aux_channels_on %d",
+ *(uint32_t*)pCmdData, effect->aux_channels_on);
break;
}
size_t i;
- for (i = 0; i < CHANNEL_CFG_CNT;i++) {
- if (memcmp((uint32_t *)pCmdData + 1,
- &sDualMicConfigs[i], sizeof(channel_config_t)) == 0) {
+ for (i = 0; i < CHANNEL_CFG_CNT; i++) {
+ if (memcmp((uint32_t*)pCmdData + 1, &sDualMicConfigs[i],
+ sizeof(channel_config_t)) == 0) {
break;
}
}
if (i == CHANNEL_CFG_CNT) {
- *(uint32_t *)pReplyData = -EINVAL;
+ *(uint32_t*)pReplyData = -EINVAL;
ALOGW("PreProcessingFx_Command EFFECT_CMD_SET_FEATURE_CONFIG invalid config"
- "[%08x].[%08x]", *((uint32_t *)pCmdData + 1), *((uint32_t *)pCmdData + 2));
+ "[%08x].[%08x]",
+ *((uint32_t*)pCmdData + 1), *((uint32_t*)pCmdData + 2));
} else {
effect->cur_channel_config = i;
- *(uint32_t *)pReplyData = 0;
+ *(uint32_t*)pReplyData = 0;
ALOGV("PreProcessingFx_Command EFFECT_CMD_SET_FEATURE_CONFIG New config"
- "[%08x].[%08x]", sDualMicConfigs[i].main_channels, sDualMicConfigs[i].aux_channels);
+ "[%08x].[%08x]",
+ sDualMicConfigs[i].main_channels, sDualMicConfigs[i].aux_channels);
}
- } break;
+ } break;
#endif
default:
return -EINVAL;
@@ -2277,11 +2094,8 @@
return 0;
}
-
-int PreProcessingFx_GetDescriptor(effect_handle_t self,
- effect_descriptor_t *pDescriptor)
-{
- preproc_effect_t * effect = (preproc_effect_t *) self;
+int PreProcessingFx_GetDescriptor(effect_handle_t self, effect_descriptor_t* pDescriptor) {
+ preproc_effect_t* effect = (preproc_effect_t*)self;
if (effect == NULL || pDescriptor == NULL) {
return -EINVAL;
@@ -2292,28 +2106,26 @@
return 0;
}
-int PreProcessingFx_ProcessReverse(effect_handle_t self,
- audio_buffer_t *inBuffer,
- audio_buffer_t *outBuffer __unused)
-{
- preproc_effect_t * effect = (preproc_effect_t *)self;
+int PreProcessingFx_ProcessReverse(effect_handle_t self, audio_buffer_t* inBuffer,
+ audio_buffer_t* outBuffer __unused) {
+ preproc_effect_t* effect = (preproc_effect_t*)self;
- if (effect == NULL){
+ if (effect == NULL) {
ALOGW("PreProcessingFx_ProcessReverse() ERROR effect == NULL");
return -EINVAL;
}
- preproc_session_t * session = (preproc_session_t *)effect->session;
+ preproc_session_t* session = (preproc_session_t*)effect->session;
- if (inBuffer == NULL || inBuffer->raw == NULL){
+ if (inBuffer == NULL || inBuffer->raw == NULL) {
ALOGW("PreProcessingFx_ProcessReverse() ERROR bad pointer");
return -EINVAL;
}
- session->revProcessedMsk |= (1<<effect->procId);
+ session->revProcessedMsk |= (1 << effect->procId);
-// ALOGV("PreProcessingFx_ProcessReverse In %d frames revEnabledMsk %08x revProcessedMsk %08x",
-// inBuffer->frameCount, session->revEnabledMsk, session->revProcessedMsk);
-
+ // ALOGV("PreProcessingFx_ProcessReverse In %d frames revEnabledMsk %08x revProcessedMsk
+ // %08x",
+ // inBuffer->frameCount, session->revEnabledMsk, session->revProcessedMsk);
if ((session->revProcessedMsk & session->revEnabledMsk) == session->revEnabledMsk) {
effect->session->revProcessedMsk = 0;
@@ -2324,10 +2136,11 @@
fr = inBuffer->frameCount;
}
if (session->revBufSize < session->framesRev + fr) {
- int16_t *buf;
+ int16_t* buf;
session->revBufSize = session->framesRev + fr;
- buf = (int16_t *)realloc(session->revBuf,
- session->revBufSize * session->inChannelCount * sizeof(int16_t));
+ buf = (int16_t*)realloc(
+ session->revBuf,
+ session->revBufSize * session->inChannelCount * sizeof(int16_t));
if (buf == NULL) {
session->framesRev = 0;
free(session->revBuf);
@@ -2336,8 +2149,7 @@
}
session->revBuf = buf;
}
- memcpy(session->revBuf + session->framesRev * session->inChannelCount,
- inBuffer->s16,
+ memcpy(session->revBuf + session->framesRev * session->inChannelCount, inBuffer->s16,
fr * session->inChannelCount * sizeof(int16_t));
session->framesRev += fr;
@@ -2348,21 +2160,13 @@
spx_uint32_t frIn = session->framesRev;
spx_uint32_t frOut = session->apmFrameCount;
if (session->inChannelCount == 1) {
- speex_resampler_process_int(session->revResampler,
- 0,
- session->revBuf,
- &frIn,
- session->revFrame->data_,
- &frOut);
+ speex_resampler_process_int(session->revResampler, 0, session->revBuf, &frIn,
+ session->revFrame->data_, &frOut);
} else {
- speex_resampler_process_interleaved_int(session->revResampler,
- session->revBuf,
- &frIn,
- session->revFrame->data_,
- &frOut);
+ speex_resampler_process_interleaved_int(session->revResampler, session->revBuf,
+ &frIn, session->revFrame->data_, &frOut);
}
- memmove(session->revBuf,
- session->revBuf + frIn * session->inChannelCount,
+ memmove(session->revBuf, session->revBuf + frIn * session->inChannelCount,
(session->framesRev - frIn) * session->inChannelCount * sizeof(int16_t));
session->framesRev -= frIn;
} else {
@@ -2371,8 +2175,7 @@
fr = inBuffer->frameCount;
}
memcpy(session->revFrame->data_ + session->framesRev * session->inChannelCount,
- inBuffer->s16,
- fr * session->inChannelCount * sizeof(int16_t));
+ inBuffer->s16, fr * session->inChannelCount * sizeof(int16_t));
session->framesRev += fr;
inBuffer->frameCount = fr;
if (session->framesRev < session->frameCount) {
@@ -2394,11 +2197,11 @@
}
session->framesRev = 0;
if (int status = effect->session->apm->ProcessReverseStream(
- (const int16_t* const)inBuffer->s16,
- (const webrtc::StreamConfig)effect->session->revConfig,
- (const webrtc::StreamConfig)effect->session->revConfig,
- (int16_t* const)outBuffer->s16);
- status != 0) {
+ (const int16_t* const)inBuffer->s16,
+ (const webrtc::StreamConfig)effect->session->revConfig,
+ (const webrtc::StreamConfig)effect->session->revConfig,
+ (int16_t* const)outBuffer->s16);
+ status != 0) {
ALOGE("Process Reverse Stream failed with error %d\n", status);
return status;
}
@@ -2409,42 +2212,31 @@
}
}
-
// effect_handle_t interface implementation for effect
const struct effect_interface_s sEffectInterface = {
- PreProcessingFx_Process,
- PreProcessingFx_Command,
- PreProcessingFx_GetDescriptor,
- NULL
-};
+ PreProcessingFx_Process, PreProcessingFx_Command, PreProcessingFx_GetDescriptor, NULL};
const struct effect_interface_s sEffectInterfaceReverse = {
- PreProcessingFx_Process,
- PreProcessingFx_Command,
- PreProcessingFx_GetDescriptor,
- PreProcessingFx_ProcessReverse
-};
+ PreProcessingFx_Process, PreProcessingFx_Command, PreProcessingFx_GetDescriptor,
+ PreProcessingFx_ProcessReverse};
//------------------------------------------------------------------------------
// Effect Library Interface Implementation
//------------------------------------------------------------------------------
-int PreProcessingLib_Create(const effect_uuid_t *uuid,
- int32_t sessionId,
- int32_t ioId,
- effect_handle_t *pInterface)
-{
+int PreProcessingLib_Create(const effect_uuid_t* uuid, int32_t sessionId, int32_t ioId,
+ effect_handle_t* pInterface) {
ALOGV("EffectCreate: uuid: %08x session %d IO: %d", uuid->timeLow, sessionId, ioId);
int status;
- const effect_descriptor_t *desc;
- preproc_session_t *session;
+ const effect_descriptor_t* desc;
+ preproc_session_t* session;
uint32_t procId;
if (PreProc_Init() != 0) {
return sInitStatus;
}
- desc = PreProc_GetDescriptor(uuid);
+ desc = PreProc_GetDescriptor(uuid);
if (desc == NULL) {
ALOGW("EffectCreate: fx not found uuid: %08x", uuid->timeLow);
return -EINVAL;
@@ -2465,14 +2257,13 @@
return status;
}
-int PreProcessingLib_Release(effect_handle_t interface)
-{
+int PreProcessingLib_Release(effect_handle_t interface) {
ALOGV("EffectRelease start %p", interface);
if (PreProc_Init() != 0) {
return sInitStatus;
}
- preproc_effect_t *fx = (preproc_effect_t *)interface;
+ preproc_effect_t* fx = (preproc_effect_t*)interface;
if (fx->session->id == 0) {
return -EINVAL;
@@ -2480,17 +2271,15 @@
return Session_ReleaseEffect(fx->session, fx);
}
-int PreProcessingLib_GetDescriptor(const effect_uuid_t *uuid,
- effect_descriptor_t *pDescriptor) {
-
- if (pDescriptor == NULL || uuid == NULL){
+int PreProcessingLib_GetDescriptor(const effect_uuid_t* uuid, effect_descriptor_t* pDescriptor) {
+ if (pDescriptor == NULL || uuid == NULL) {
return -EINVAL;
}
- const effect_descriptor_t *desc = PreProc_GetDescriptor(uuid);
+ const effect_descriptor_t* desc = PreProc_GetDescriptor(uuid);
if (desc == NULL) {
ALOGV("PreProcessingLib_GetDescriptor() not found");
- return -EINVAL;
+ return -EINVAL;
}
ALOGV("PreProcessingLib_GetDescriptor() got fx %s", desc->name);
@@ -2500,15 +2289,13 @@
}
// This is the only symbol that needs to be exported
-__attribute__ ((visibility ("default")))
-audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM = {
- .tag = AUDIO_EFFECT_LIBRARY_TAG,
- .version = EFFECT_LIBRARY_API_VERSION,
- .name = "Audio Preprocessing Library",
- .implementor = "The Android Open Source Project",
- .create_effect = PreProcessingLib_Create,
- .release_effect = PreProcessingLib_Release,
- .get_descriptor = PreProcessingLib_GetDescriptor
-};
+__attribute__((visibility("default"))) audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM = {
+ .tag = AUDIO_EFFECT_LIBRARY_TAG,
+ .version = EFFECT_LIBRARY_API_VERSION,
+ .name = "Audio Preprocessing Library",
+ .implementor = "The Android Open Source Project",
+ .create_effect = PreProcessingLib_Create,
+ .release_effect = PreProcessingLib_Release,
+ .get_descriptor = PreProcessingLib_GetDescriptor};
-}; // extern "C"
+}; // extern "C"
diff --git a/media/libeffects/preprocessing/benchmarks/preprocessing_benchmark.cpp b/media/libeffects/preprocessing/benchmarks/preprocessing_benchmark.cpp
index d4df371..3a0ad6d 100644
--- a/media/libeffects/preprocessing/benchmarks/preprocessing_benchmark.cpp
+++ b/media/libeffects/preprocessing/benchmarks/preprocessing_benchmark.cpp
@@ -47,13 +47,13 @@
* BM_PREPROCESSING/5/3 13254 ns 13212 ns 52972
*******************************************************************/
+#include <audio_effects/effect_aec.h>
+#include <audio_effects/effect_agc.h>
#include <array>
#include <climits>
#include <cstdlib>
#include <random>
#include <vector>
-#include <audio_effects/effect_aec.h>
-#include <audio_effects/effect_agc.h>
#ifndef WEBRTC_LEGACY
#include <audio_effects/effect_agc2.h>
#endif
@@ -70,190 +70,175 @@
constexpr float kTenMilliSecVal = 0.01;
constexpr unsigned int kStreamDelayMs = 0;
constexpr effect_uuid_t kEffectUuids[] = {
- // agc uuid
- {0xaa8130e0, 0x66fc, 0x11e0, 0xbad0, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
- // aec uuid
- {0xbb392ec0, 0x8d4d, 0x11e0, 0xa896, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
- // ns uuid
- {0xc06c8400, 0x8e06, 0x11e0, 0x9cb6, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
+ // agc uuid
+ {0xaa8130e0, 0x66fc, 0x11e0, 0xbad0, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
+ // aec uuid
+ {0xbb392ec0, 0x8d4d, 0x11e0, 0xa896, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
+ // ns uuid
+ {0xc06c8400, 0x8e06, 0x11e0, 0x9cb6, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
#ifndef WEBRTC_LEGACY
- // agc2 uuid
- {0x89f38e65, 0xd4d2, 0x4d64, 0xad0e, {0x2b, 0x3e, 0x79, 0x9e, 0xa8, 0x86}},
+ // agc2 uuid
+ {0x89f38e65, 0xd4d2, 0x4d64, 0xad0e, {0x2b, 0x3e, 0x79, 0x9e, 0xa8, 0x86}},
#endif
};
constexpr size_t kNumEffectUuids = std::size(kEffectUuids);
constexpr audio_channel_mask_t kChMasks[] = {
- AUDIO_CHANNEL_IN_MONO,
- AUDIO_CHANNEL_IN_STEREO,
- AUDIO_CHANNEL_IN_2POINT0POINT2,
- AUDIO_CHANNEL_IN_2POINT1POINT2,
- AUDIO_CHANNEL_IN_6,
+ AUDIO_CHANNEL_IN_MONO, AUDIO_CHANNEL_IN_STEREO, AUDIO_CHANNEL_IN_2POINT0POINT2,
+ AUDIO_CHANNEL_IN_2POINT1POINT2, AUDIO_CHANNEL_IN_6,
};
constexpr size_t kNumChMasks = std::size(kChMasks);
// types of pre processing modules
enum PreProcId {
- PREPROC_AGC, // Automatic Gain Control
- PREPROC_AEC, // Acoustic Echo Canceler
- PREPROC_NS, // Noise Suppressor
+ PREPROC_AGC, // Automatic Gain Control
+ PREPROC_AEC, // Acoustic Echo Canceler
+ PREPROC_NS, // Noise Suppressor
#ifndef WEBRTC_LEGACY
- PREPROC_AGC2, // Automatic Gain Control 2
+ PREPROC_AGC2, // Automatic Gain Control 2
#endif
- PREPROC_NUM_EFFECTS
+ PREPROC_NUM_EFFECTS
};
-int preProcCreateEffect(effect_handle_t *pEffectHandle, uint32_t effectType,
- effect_config_t *pConfig, int sessionId, int ioId) {
- if (int status = AUDIO_EFFECT_LIBRARY_INFO_SYM.create_effect(
- &kEffectUuids[effectType], sessionId, ioId, pEffectHandle);
- status != 0) {
- ALOGE("Audio Preprocessing create returned an error = %d\n", status);
- return EXIT_FAILURE;
- }
- int reply = 0;
- uint32_t replySize = sizeof(reply);
- if (effectType == PREPROC_AEC) {
- if (int status =
- (**pEffectHandle)
- ->command(*pEffectHandle, EFFECT_CMD_SET_CONFIG_REVERSE,
- sizeof(effect_config_t), pConfig, &replySize, &reply);
+int preProcCreateEffect(effect_handle_t* pEffectHandle, uint32_t effectType,
+ effect_config_t* pConfig, int sessionId, int ioId) {
+ if (int status = AUDIO_EFFECT_LIBRARY_INFO_SYM.create_effect(&kEffectUuids[effectType],
+ sessionId, ioId, pEffectHandle);
status != 0) {
- ALOGE("Set config reverse command returned an error = %d\n", status);
- return EXIT_FAILURE;
+ ALOGE("Audio Preprocessing create returned an error = %d\n", status);
+ return EXIT_FAILURE;
}
- }
- if (int status =
- (**pEffectHandle)
- ->command(*pEffectHandle, EFFECT_CMD_SET_CONFIG,
- sizeof(effect_config_t), pConfig, &replySize, &reply);
- status != 0) {
- ALOGE("Set config command returned an error = %d\n", status);
- return EXIT_FAILURE;
- }
- return reply;
+ int reply = 0;
+ uint32_t replySize = sizeof(reply);
+ if (effectType == PREPROC_AEC) {
+ if (int status = (**pEffectHandle)
+ ->command(*pEffectHandle, EFFECT_CMD_SET_CONFIG_REVERSE,
+ sizeof(effect_config_t), pConfig, &replySize, &reply);
+ status != 0) {
+ ALOGE("Set config reverse command returned an error = %d\n", status);
+ return EXIT_FAILURE;
+ }
+ }
+ if (int status = (**pEffectHandle)
+ ->command(*pEffectHandle, EFFECT_CMD_SET_CONFIG,
+ sizeof(effect_config_t), pConfig, &replySize, &reply);
+ status != 0) {
+ ALOGE("Set config command returned an error = %d\n", status);
+ return EXIT_FAILURE;
+ }
+ return reply;
}
-int preProcSetConfigParam(effect_handle_t effectHandle, uint32_t paramType,
- uint32_t paramValue) {
- int reply = 0;
- uint32_t replySize = sizeof(reply);
- uint32_t paramData[2] = {paramType, paramValue};
- effect_param_t *effectParam =
- (effect_param_t *)malloc(sizeof(*effectParam) + sizeof(paramData));
- memcpy(&effectParam->data[0], ¶mData[0], sizeof(paramData));
- effectParam->psize = sizeof(paramData[0]);
- (*effectHandle)
- ->command(effectHandle, EFFECT_CMD_SET_PARAM, sizeof(effect_param_t),
- effectParam, &replySize, &reply);
- free(effectParam);
- return reply;
+int preProcSetConfigParam(effect_handle_t effectHandle, uint32_t paramType, uint32_t paramValue) {
+ int reply = 0;
+ uint32_t replySize = sizeof(reply);
+ uint32_t paramData[2] = {paramType, paramValue};
+ effect_param_t* effectParam = (effect_param_t*)malloc(sizeof(*effectParam) + sizeof(paramData));
+ memcpy(&effectParam->data[0], ¶mData[0], sizeof(paramData));
+ effectParam->psize = sizeof(paramData[0]);
+ (*effectHandle)
+ ->command(effectHandle, EFFECT_CMD_SET_PARAM, sizeof(effect_param_t), effectParam,
+ &replySize, &reply);
+ free(effectParam);
+ return reply;
}
short preProcGetShortVal(float paramValue) {
- return static_cast<short>(paramValue * std::numeric_limits<short>::max());
+ return static_cast<short>(paramValue * std::numeric_limits<short>::max());
}
-static void BM_PREPROCESSING(benchmark::State &state) {
- const size_t chMask = kChMasks[state.range(0) - 1];
- const size_t channelCount = audio_channel_count_from_in_mask(chMask);
+static void BM_PREPROCESSING(benchmark::State& state) {
+ const size_t chMask = kChMasks[state.range(0) - 1];
+ const size_t channelCount = audio_channel_count_from_in_mask(chMask);
- PreProcId effectType = (PreProcId)state.range(1);
+ PreProcId effectType = (PreProcId)state.range(1);
- int32_t sessionId = 1;
- int32_t ioId = 1;
- effect_handle_t effectHandle = nullptr;
- effect_config_t config{};
- config.inputCfg.samplingRate = config.outputCfg.samplingRate = kSampleRate;
- config.inputCfg.channels = config.outputCfg.channels = chMask;
- config.inputCfg.format = config.outputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
+ int32_t sessionId = 1;
+ int32_t ioId = 1;
+ effect_handle_t effectHandle = nullptr;
+ effect_config_t config{};
+ config.inputCfg.samplingRate = config.outputCfg.samplingRate = kSampleRate;
+ config.inputCfg.channels = config.outputCfg.channels = chMask;
+ config.inputCfg.format = config.outputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
- if (int status = preProcCreateEffect(&effectHandle, state.range(1), &config,
- sessionId, ioId);
- status != 0) {
- ALOGE("Create effect call returned error %i", status);
- return;
- }
-
- int reply = 0;
- uint32_t replySize = sizeof(reply);
- if (int status = (*effectHandle)
- ->command(effectHandle, EFFECT_CMD_ENABLE, 0, nullptr,
- &replySize, &reply);
- status != 0) {
- ALOGE("Command enable call returned error %d\n", reply);
- return;
- }
-
- // Initialize input buffer with deterministic pseudo-random values
- const int frameLength = (int)(kSampleRate * kTenMilliSecVal);
- std::minstd_rand gen(chMask);
- std::uniform_real_distribution<> dis(-1.0f, 1.0f);
- std::vector<short> in(frameLength * channelCount);
- for (auto &i : in) {
- i = preProcGetShortVal(dis(gen));
- }
- std::vector<short> farIn(frameLength * channelCount);
- for (auto &i : farIn) {
- i = preProcGetShortVal(dis(gen));
- }
- std::vector<short> out(frameLength * channelCount);
-
- // Run the test
- for (auto _ : state) {
- benchmark::DoNotOptimize(in.data());
- benchmark::DoNotOptimize(out.data());
- benchmark::DoNotOptimize(farIn.data());
-
- audio_buffer_t inBuffer = {.frameCount = (size_t)frameLength,
- .s16 = in.data()};
- audio_buffer_t outBuffer = {.frameCount = (size_t)frameLength,
- .s16 = out.data()};
- audio_buffer_t farInBuffer = {.frameCount = (size_t)frameLength,
- .s16 = farIn.data()};
-
- if (PREPROC_AEC == effectType) {
- if (int status = preProcSetConfigParam(effectHandle, AEC_PARAM_ECHO_DELAY,
- kStreamDelayMs);
- status != 0) {
- ALOGE("preProcSetConfigParam returned Error %d\n", status);
- return;
- }
- }
- if (int status =
- (*effectHandle)->process(effectHandle, &inBuffer, &outBuffer);
+ if (int status = preProcCreateEffect(&effectHandle, state.range(1), &config, sessionId, ioId);
status != 0) {
- ALOGE("\nError: Process i = %d returned with error %d\n",
- (int)state.range(1), status);
- return;
- }
- if (PREPROC_AEC == effectType) {
- if (int status =
- (*effectHandle)
- ->process_reverse(effectHandle, &farInBuffer, &outBuffer);
- status != 0) {
- ALOGE("\nError: Process reverse i = %d returned with error %d\n",
- (int)state.range(1), status);
+ ALOGE("Create effect call returned error %i", status);
return;
- }
}
- }
- benchmark::ClobberMemory();
- state.SetComplexityN(state.range(0));
+ int reply = 0;
+ uint32_t replySize = sizeof(reply);
+ if (int status =
+ (*effectHandle)
+ ->command(effectHandle, EFFECT_CMD_ENABLE, 0, nullptr, &replySize, &reply);
+ status != 0) {
+ ALOGE("Command enable call returned error %d\n", reply);
+ return;
+ }
- if (int status = AUDIO_EFFECT_LIBRARY_INFO_SYM.release_effect(effectHandle);
- status != 0) {
- ALOGE("release_effect returned an error = %d\n", status);
- return;
- }
+ // Initialize input buffer with deterministic pseudo-random values
+ const int frameLength = (int)(kSampleRate * kTenMilliSecVal);
+ std::minstd_rand gen(chMask);
+ std::uniform_real_distribution<> dis(-1.0f, 1.0f);
+ std::vector<short> in(frameLength * channelCount);
+ for (auto& i : in) {
+ i = preProcGetShortVal(dis(gen));
+ }
+ std::vector<short> farIn(frameLength * channelCount);
+ for (auto& i : farIn) {
+ i = preProcGetShortVal(dis(gen));
+ }
+ std::vector<short> out(frameLength * channelCount);
+
+ // Run the test
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(in.data());
+ benchmark::DoNotOptimize(out.data());
+ benchmark::DoNotOptimize(farIn.data());
+
+ audio_buffer_t inBuffer = {.frameCount = (size_t)frameLength, .s16 = in.data()};
+ audio_buffer_t outBuffer = {.frameCount = (size_t)frameLength, .s16 = out.data()};
+ audio_buffer_t farInBuffer = {.frameCount = (size_t)frameLength, .s16 = farIn.data()};
+
+ if (PREPROC_AEC == effectType) {
+ if (int status =
+ preProcSetConfigParam(effectHandle, AEC_PARAM_ECHO_DELAY, kStreamDelayMs);
+ status != 0) {
+ ALOGE("preProcSetConfigParam returned Error %d\n", status);
+ return;
+ }
+ }
+ if (int status = (*effectHandle)->process(effectHandle, &inBuffer, &outBuffer);
+ status != 0) {
+ ALOGE("\nError: Process i = %d returned with error %d\n", (int)state.range(1), status);
+ return;
+ }
+ if (PREPROC_AEC == effectType) {
+ if (int status =
+ (*effectHandle)->process_reverse(effectHandle, &farInBuffer, &outBuffer);
+ status != 0) {
+ ALOGE("\nError: Process reverse i = %d returned with error %d\n",
+ (int)state.range(1), status);
+ return;
+ }
+ }
+ }
+ benchmark::ClobberMemory();
+
+ state.SetComplexityN(state.range(0));
+
+ if (int status = AUDIO_EFFECT_LIBRARY_INFO_SYM.release_effect(effectHandle); status != 0) {
+ ALOGE("release_effect returned an error = %d\n", status);
+ return;
+ }
}
-static void preprocessingArgs(benchmark::internal::Benchmark *b) {
- for (int i = 1; i <= (int)kNumChMasks; i++) {
- for (int j = 0; j < (int)kNumEffectUuids; ++j) {
- b->Args({i, j});
+static void preprocessingArgs(benchmark::internal::Benchmark* b) {
+ for (int i = 1; i <= (int)kNumChMasks; i++) {
+ for (int j = 0; j < (int)kNumEffectUuids; ++j) {
+ b->Args({i, j});
+ }
}
- }
}
BENCHMARK(BM_PREPROCESSING)->Apply(preprocessingArgs);
diff --git a/media/libeffects/preprocessing/tests/PreProcessingTest.cpp b/media/libeffects/preprocessing/tests/PreProcessingTest.cpp
index 3e8ea76..65b9469 100644
--- a/media/libeffects/preprocessing/tests/PreProcessingTest.cpp
+++ b/media/libeffects/preprocessing/tests/PreProcessingTest.cpp
@@ -37,464 +37,465 @@
// types of pre processing modules
enum PreProcId {
- PREPROC_AGC, // Automatic Gain Control
+ PREPROC_AGC, // Automatic Gain Control
#ifndef WEBRTC_LEGACY
- PREPROC_AGC2, // Automatic Gain Control 2
+ PREPROC_AGC2, // Automatic Gain Control 2
#endif
- PREPROC_AEC, // Acoustic Echo Canceler
- PREPROC_NS, // Noise Suppressor
- PREPROC_NUM_EFFECTS
+ PREPROC_AEC, // Acoustic Echo Canceler
+ PREPROC_NS, // Noise Suppressor
+ PREPROC_NUM_EFFECTS
};
enum PreProcParams {
- ARG_HELP = 1,
- ARG_INPUT,
- ARG_OUTPUT,
- ARG_FAR,
- ARG_FS,
- ARG_CH_MASK,
- ARG_AGC_TGT_LVL,
- ARG_AGC_COMP_LVL,
- ARG_AEC_DELAY,
- ARG_NS_LVL,
+ ARG_HELP = 1,
+ ARG_INPUT,
+ ARG_OUTPUT,
+ ARG_FAR,
+ ARG_FS,
+ ARG_CH_MASK,
+ ARG_AGC_TGT_LVL,
+ ARG_AGC_COMP_LVL,
+ ARG_AEC_DELAY,
+ ARG_NS_LVL,
#ifndef WEBRTC_LEGACY
- ARG_AGC2_GAIN,
- ARG_AGC2_LVL,
- ARG_AGC2_SAT_MGN
+ ARG_AGC2_GAIN,
+ ARG_AGC2_LVL,
+ ARG_AGC2_SAT_MGN
#endif
};
struct preProcConfigParams_t {
- int samplingFreq = 16000;
- audio_channel_mask_t chMask = AUDIO_CHANNEL_IN_MONO;
- int nsLevel = 0; // a value between 0-3
- int agcTargetLevel = 3; // in dB
- int agcCompLevel = 9; // in dB
+ int samplingFreq = 16000;
+ audio_channel_mask_t chMask = AUDIO_CHANNEL_IN_MONO;
+ int nsLevel = 0; // a value between 0-3
+ int agcTargetLevel = 3; // in dB
+ int agcCompLevel = 9; // in dB
#ifndef WEBRTC_LEGACY
- float agc2Gain = 0.f; // in dB
- float agc2SaturationMargin = 2.f; // in dB
- int agc2Level = 0; // either kRms(0) or kPeak(1)
+ float agc2Gain = 0.f; // in dB
+ float agc2SaturationMargin = 2.f; // in dB
+ int agc2Level = 0; // either kRms(0) or kPeak(1)
#endif
- int aecDelay = 0; // in ms
+ int aecDelay = 0; // in ms
};
const effect_uuid_t kPreProcUuids[PREPROC_NUM_EFFECTS] = {
- {0xaa8130e0, 0x66fc, 0x11e0, 0xbad0, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}}, // agc uuid
+ {0xaa8130e0, 0x66fc, 0x11e0, 0xbad0, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}}, // agc uuid
#ifndef WEBRTC_LEGACY
- {0x89f38e65, 0xd4d2, 0x4d64, 0xad0e, {0x2b, 0x3e, 0x79, 0x9e, 0xa8, 0x86}}, // agc2 uuid
+ {0x89f38e65, 0xd4d2, 0x4d64, 0xad0e, {0x2b, 0x3e, 0x79, 0x9e, 0xa8, 0x86}}, // agc2 uuid
#endif
- {0xbb392ec0, 0x8d4d, 0x11e0, 0xa896, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}}, // aec uuid
- {0xc06c8400, 0x8e06, 0x11e0, 0x9cb6, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}}, // ns uuid
+ {0xbb392ec0, 0x8d4d, 0x11e0, 0xa896, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}}, // aec uuid
+ {0xc06c8400, 0x8e06, 0x11e0, 0x9cb6, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}}, // ns uuid
};
constexpr audio_channel_mask_t kPreProcConfigChMask[] = {
- AUDIO_CHANNEL_IN_MONO,
- AUDIO_CHANNEL_IN_STEREO,
- AUDIO_CHANNEL_IN_FRONT_BACK,
- AUDIO_CHANNEL_IN_6,
- AUDIO_CHANNEL_IN_2POINT0POINT2,
- AUDIO_CHANNEL_IN_2POINT1POINT2,
- AUDIO_CHANNEL_IN_3POINT0POINT2,
- AUDIO_CHANNEL_IN_3POINT1POINT2,
- AUDIO_CHANNEL_IN_5POINT1,
- AUDIO_CHANNEL_IN_VOICE_UPLINK_MONO,
- AUDIO_CHANNEL_IN_VOICE_DNLINK_MONO,
- AUDIO_CHANNEL_IN_VOICE_CALL_MONO,
+ AUDIO_CHANNEL_IN_MONO,
+ AUDIO_CHANNEL_IN_STEREO,
+ AUDIO_CHANNEL_IN_FRONT_BACK,
+ AUDIO_CHANNEL_IN_6,
+ AUDIO_CHANNEL_IN_2POINT0POINT2,
+ AUDIO_CHANNEL_IN_2POINT1POINT2,
+ AUDIO_CHANNEL_IN_3POINT0POINT2,
+ AUDIO_CHANNEL_IN_3POINT1POINT2,
+ AUDIO_CHANNEL_IN_5POINT1,
+ AUDIO_CHANNEL_IN_VOICE_UPLINK_MONO,
+ AUDIO_CHANNEL_IN_VOICE_DNLINK_MONO,
+ AUDIO_CHANNEL_IN_VOICE_CALL_MONO,
};
constexpr int kPreProcConfigChMaskCount = std::size(kPreProcConfigChMask);
void printUsage() {
- printf("\nUsage: ");
- printf("\n <executable> [options]\n");
- printf("\nwhere options are, ");
- printf("\n --input <inputfile>");
- printf("\n path to the input file");
- printf("\n --output <outputfile>");
- printf("\n path to the output file");
- printf("\n --help");
- printf("\n Prints this usage information");
- printf("\n --fs <sampling_freq>");
- printf("\n Sampling frequency in Hz, default 16000.");
- printf("\n -ch_mask <channel_mask>\n");
- printf("\n 0 - AUDIO_CHANNEL_IN_MONO");
- printf("\n 1 - AUDIO_CHANNEL_IN_STEREO");
- printf("\n 2 - AUDIO_CHANNEL_IN_FRONT_BACK");
- printf("\n 3 - AUDIO_CHANNEL_IN_6");
- printf("\n 4 - AUDIO_CHANNEL_IN_2POINT0POINT2");
- printf("\n 5 - AUDIO_CHANNEL_IN_2POINT1POINT2");
- printf("\n 6 - AUDIO_CHANNEL_IN_3POINT0POINT2");
- printf("\n 7 - AUDIO_CHANNEL_IN_3POINT1POINT2");
- printf("\n 8 - AUDIO_CHANNEL_IN_5POINT1");
- printf("\n 9 - AUDIO_CHANNEL_IN_VOICE_UPLINK_MONO");
- printf("\n 10 - AUDIO_CHANNEL_IN_VOICE_DNLINK_MONO ");
- printf("\n 11 - AUDIO_CHANNEL_IN_VOICE_CALL_MONO ");
- printf("\n default 0");
- printf("\n --far <farend_file>");
- printf("\n Path to far-end file needed for echo cancellation");
- printf("\n --aec");
- printf("\n Enable Echo Cancellation, default disabled");
- printf("\n --ns");
- printf("\n Enable Noise Suppression, default disabled");
- printf("\n --agc");
- printf("\n Enable Gain Control, default disabled");
+ printf("\nUsage: ");
+ printf("\n <executable> [options]\n");
+ printf("\nwhere options are, ");
+ printf("\n --input <inputfile>");
+ printf("\n path to the input file");
+ printf("\n --output <outputfile>");
+ printf("\n path to the output file");
+ printf("\n --help");
+ printf("\n Prints this usage information");
+ printf("\n --fs <sampling_freq>");
+ printf("\n Sampling frequency in Hz, default 16000.");
+ printf("\n -ch_mask <channel_mask>\n");
+ printf("\n 0 - AUDIO_CHANNEL_IN_MONO");
+ printf("\n 1 - AUDIO_CHANNEL_IN_STEREO");
+ printf("\n 2 - AUDIO_CHANNEL_IN_FRONT_BACK");
+ printf("\n 3 - AUDIO_CHANNEL_IN_6");
+ printf("\n 4 - AUDIO_CHANNEL_IN_2POINT0POINT2");
+ printf("\n 5 - AUDIO_CHANNEL_IN_2POINT1POINT2");
+ printf("\n 6 - AUDIO_CHANNEL_IN_3POINT0POINT2");
+ printf("\n 7 - AUDIO_CHANNEL_IN_3POINT1POINT2");
+ printf("\n 8 - AUDIO_CHANNEL_IN_5POINT1");
+ printf("\n 9 - AUDIO_CHANNEL_IN_VOICE_UPLINK_MONO");
+ printf("\n 10 - AUDIO_CHANNEL_IN_VOICE_DNLINK_MONO ");
+ printf("\n 11 - AUDIO_CHANNEL_IN_VOICE_CALL_MONO ");
+ printf("\n default 0");
+ printf("\n --far <farend_file>");
+ printf("\n Path to far-end file needed for echo cancellation");
+ printf("\n --aec");
+ printf("\n Enable Echo Cancellation, default disabled");
+ printf("\n --ns");
+ printf("\n Enable Noise Suppression, default disabled");
+ printf("\n --agc");
+ printf("\n Enable Gain Control, default disabled");
#ifndef WEBRTC_LEGACY
- printf("\n --agc2");
- printf("\n Enable Gain Controller 2, default disabled");
+ printf("\n --agc2");
+ printf("\n Enable Gain Controller 2, default disabled");
#endif
- printf("\n --ns_lvl <ns_level>");
- printf("\n Noise Suppression level in dB, default value 0dB");
- printf("\n --agc_tgt_lvl <target_level>");
- printf("\n AGC Target Level in dB, default value 3dB");
- printf("\n --agc_comp_lvl <comp_level>");
- printf("\n AGC Comp Level in dB, default value 9dB");
+ printf("\n --ns_lvl <ns_level>");
+ printf("\n Noise Suppression level in dB, default value 0dB");
+ printf("\n --agc_tgt_lvl <target_level>");
+ printf("\n AGC Target Level in dB, default value 3dB");
+ printf("\n --agc_comp_lvl <comp_level>");
+ printf("\n AGC Comp Level in dB, default value 9dB");
#ifndef WEBRTC_LEGACY
- printf("\n --agc2_gain <fixed_digital_gain>");
- printf("\n AGC Fixed Digital Gain in dB, default value 0dB");
- printf("\n --agc2_lvl <level_estimator>");
- printf("\n AGC Adaptive Digital Level Estimator, default value kRms");
- printf("\n --agc2_sat_mgn <saturation_margin>");
- printf("\n AGC Adaptive Digital Saturation Margin in dB, default value 2dB");
+ printf("\n --agc2_gain <fixed_digital_gain>");
+ printf("\n AGC Fixed Digital Gain in dB, default value 0dB");
+ printf("\n --agc2_lvl <level_estimator>");
+ printf("\n AGC Adaptive Digital Level Estimator, default value kRms");
+ printf("\n --agc2_sat_mgn <saturation_margin>");
+ printf("\n AGC Adaptive Digital Saturation Margin in dB, default value 2dB");
#endif
- printf("\n --aec_delay <delay>");
- printf("\n AEC delay value in ms, default value 0ms");
- printf("\n");
+ printf("\n --aec_delay <delay>");
+ printf("\n AEC delay value in ms, default value 0ms");
+ printf("\n");
}
constexpr float kTenMilliSecVal = 0.01;
-int preProcCreateEffect(effect_handle_t *pEffectHandle, uint32_t effectType,
- effect_config_t *pConfig, int sessionId, int ioId) {
- if (int status = AUDIO_EFFECT_LIBRARY_INFO_SYM.create_effect(&kPreProcUuids[effectType],
- sessionId, ioId, pEffectHandle);
- status != 0) {
- ALOGE("Audio Preprocessing create returned an error = %d\n", status);
- return EXIT_FAILURE;
- }
- int reply = 0;
- uint32_t replySize = sizeof(reply);
- if (effectType == PREPROC_AEC) {
+int preProcCreateEffect(effect_handle_t* pEffectHandle, uint32_t effectType,
+ effect_config_t* pConfig, int sessionId, int ioId) {
+ if (int status = AUDIO_EFFECT_LIBRARY_INFO_SYM.create_effect(&kPreProcUuids[effectType],
+ sessionId, ioId, pEffectHandle);
+ status != 0) {
+ ALOGE("Audio Preprocessing create returned an error = %d\n", status);
+ return EXIT_FAILURE;
+ }
+ int reply = 0;
+ uint32_t replySize = sizeof(reply);
+ if (effectType == PREPROC_AEC) {
+ (**pEffectHandle)
+ ->command(*pEffectHandle, EFFECT_CMD_SET_CONFIG_REVERSE, sizeof(effect_config_t),
+ pConfig, &replySize, &reply);
+ }
(**pEffectHandle)
- ->command(*pEffectHandle, EFFECT_CMD_SET_CONFIG_REVERSE, sizeof(effect_config_t), pConfig,
- &replySize, &reply);
- }
- (**pEffectHandle)
- ->command(*pEffectHandle, EFFECT_CMD_SET_CONFIG, sizeof(effect_config_t), pConfig,
- &replySize, &reply);
- return reply;
+ ->command(*pEffectHandle, EFFECT_CMD_SET_CONFIG, sizeof(effect_config_t), pConfig,
+ &replySize, &reply);
+ return reply;
}
int preProcSetConfigParam(uint32_t paramType, uint32_t paramValue, effect_handle_t effectHandle) {
- int reply = 0;
- uint32_t replySize = sizeof(reply);
- uint32_t paramData[2] = {paramType, paramValue};
- effect_param_t *effectParam =
- (effect_param_t *)malloc(sizeof(*effectParam) + sizeof(paramData));
- memcpy(&effectParam->data[0], ¶mData[0], sizeof(paramData));
- effectParam->psize = sizeof(paramData[0]);
- (*effectHandle)
- ->command(effectHandle, EFFECT_CMD_SET_PARAM, sizeof(effect_param_t), effectParam,
- &replySize, &reply);
- free(effectParam);
- return reply;
+ int reply = 0;
+ uint32_t replySize = sizeof(reply);
+ uint32_t paramData[2] = {paramType, paramValue};
+ effect_param_t* effectParam = (effect_param_t*)malloc(sizeof(*effectParam) + sizeof(paramData));
+ memcpy(&effectParam->data[0], ¶mData[0], sizeof(paramData));
+ effectParam->psize = sizeof(paramData[0]);
+ (*effectHandle)
+ ->command(effectHandle, EFFECT_CMD_SET_PARAM, sizeof(effect_param_t), effectParam,
+ &replySize, &reply);
+ free(effectParam);
+ return reply;
}
-int main(int argc, const char *argv[]) {
- if (argc == 1) {
- printUsage();
- return EXIT_FAILURE;
- }
- const char *inputFile = nullptr;
- const char *outputFile = nullptr;
- const char *farFile = nullptr;
- int effectEn[PREPROC_NUM_EFFECTS] = {0};
-
- const option long_opts[] = {
- {"help", no_argument, nullptr, ARG_HELP},
- {"input", required_argument, nullptr, ARG_INPUT},
- {"output", required_argument, nullptr, ARG_OUTPUT},
- {"far", required_argument, nullptr, ARG_FAR},
- {"fs", required_argument, nullptr, ARG_FS},
- {"ch_mask", required_argument, nullptr, ARG_CH_MASK},
- {"agc_tgt_lvl", required_argument, nullptr, ARG_AGC_TGT_LVL},
- {"agc_comp_lvl", required_argument, nullptr, ARG_AGC_COMP_LVL},
-#ifndef WEBRTC_LEGACY
- {"agc2_gain", required_argument, nullptr, ARG_AGC2_GAIN},
- {"agc2_lvl", required_argument, nullptr, ARG_AGC2_LVL},
- {"agc2_sat_mgn", required_argument, nullptr, ARG_AGC2_SAT_MGN},
-#endif
- {"aec_delay", required_argument, nullptr, ARG_AEC_DELAY},
- {"ns_lvl", required_argument, nullptr, ARG_NS_LVL},
- {"aec", no_argument, &effectEn[PREPROC_AEC], 1},
- {"agc", no_argument, &effectEn[PREPROC_AGC], 1},
-#ifndef WEBRTC_LEGACY
- {"agc2", no_argument, &effectEn[PREPROC_AGC2], 1},
-#endif
- {"ns", no_argument, &effectEn[PREPROC_NS], 1},
- {nullptr, 0, nullptr, 0},
- };
- struct preProcConfigParams_t preProcCfgParams {};
-
- while (true) {
- const int opt = getopt_long(argc, (char *const *)argv, "i:o:", long_opts, nullptr);
- if (opt == -1) {
- break;
- }
- switch (opt) {
- case ARG_HELP:
+int main(int argc, const char* argv[]) {
+ if (argc == 1) {
printUsage();
- return 0;
- case ARG_INPUT: {
- inputFile = (char *)optarg;
- break;
- }
- case ARG_OUTPUT: {
- outputFile = (char *)optarg;
- break;
- }
- case ARG_FAR: {
- farFile = (char *)optarg;
- break;
- }
- case ARG_FS: {
- preProcCfgParams.samplingFreq = atoi(optarg);
- break;
- }
- case ARG_CH_MASK: {
- int chMaskIdx = atoi(optarg);
- if (chMaskIdx < 0 or chMaskIdx > kPreProcConfigChMaskCount) {
- ALOGE("Channel Mask index not in correct range\n");
- printUsage();
- return EXIT_FAILURE;
- }
- preProcCfgParams.chMask = kPreProcConfigChMask[chMaskIdx];
- break;
- }
- case ARG_AGC_TGT_LVL: {
- preProcCfgParams.agcTargetLevel = atoi(optarg);
- break;
- }
- case ARG_AGC_COMP_LVL: {
- preProcCfgParams.agcCompLevel = atoi(optarg);
- break;
- }
-#ifndef WEBRTC_LEGACY
- case ARG_AGC2_GAIN: {
- preProcCfgParams.agc2Gain = atof(optarg);
- break;
- }
- case ARG_AGC2_LVL: {
- preProcCfgParams.agc2Level = atoi(optarg);
- break;
- }
- case ARG_AGC2_SAT_MGN: {
- preProcCfgParams.agc2SaturationMargin = atof(optarg);
- break;
- }
-#endif
- case ARG_AEC_DELAY: {
- preProcCfgParams.aecDelay = atoi(optarg);
- break;
- }
- case ARG_NS_LVL: {
- preProcCfgParams.nsLevel = atoi(optarg);
- break;
- }
- default:
- break;
- }
- }
-
- if (inputFile == nullptr) {
- ALOGE("Error: missing input file\n");
- printUsage();
- return EXIT_FAILURE;
- }
-
- std::unique_ptr<FILE, decltype(&fclose)> inputFp(fopen(inputFile, "rb"), &fclose);
- if (inputFp == nullptr) {
- ALOGE("Cannot open input file %s\n", inputFile);
- return EXIT_FAILURE;
- }
-
- std::unique_ptr<FILE, decltype(&fclose)> farFp(fopen(farFile, "rb"), &fclose);
- std::unique_ptr<FILE, decltype(&fclose)> outputFp(fopen(outputFile, "wb"), &fclose);
- if (effectEn[PREPROC_AEC]) {
- if (farFile == nullptr) {
- ALOGE("Far end signal file required for echo cancellation \n");
- return EXIT_FAILURE;
- }
- if (farFp == nullptr) {
- ALOGE("Cannot open far end stream file %s\n", farFile);
- return EXIT_FAILURE;
- }
- struct stat statInput, statFar;
- (void)fstat(fileno(inputFp.get()), &statInput);
- (void)fstat(fileno(farFp.get()), &statFar);
- if (statInput.st_size != statFar.st_size) {
- ALOGE("Near and far end signals are of different sizes");
- return EXIT_FAILURE;
- }
- }
- if (outputFile != nullptr && outputFp == nullptr) {
- ALOGE("Cannot open output file %s\n", outputFile);
- return EXIT_FAILURE;
- }
-
- int32_t sessionId = 1;
- int32_t ioId = 1;
- effect_handle_t effectHandle[PREPROC_NUM_EFFECTS] = {nullptr};
- effect_config_t config;
- config.inputCfg.samplingRate = config.outputCfg.samplingRate = preProcCfgParams.samplingFreq;
- config.inputCfg.channels = config.outputCfg.channels = preProcCfgParams.chMask;
- config.inputCfg.format = config.outputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
-
- // Create all the effect handles
- for (int i = 0; i < PREPROC_NUM_EFFECTS; i++) {
- if (int status = preProcCreateEffect(&effectHandle[i], i, &config, sessionId, ioId);
- status != 0) {
- ALOGE("Create effect call returned error %i", status);
- return EXIT_FAILURE;
- }
- }
-
- for (int i = 0; i < PREPROC_NUM_EFFECTS; i++) {
- if (effectEn[i] == 1) {
- int reply = 0;
- uint32_t replySize = sizeof(reply);
- (*effectHandle[i])
- ->command(effectHandle[i], EFFECT_CMD_ENABLE, 0, nullptr, &replySize, &reply);
- if (reply != 0) {
- ALOGE("Command enable call returned error %d\n", reply);
return EXIT_FAILURE;
- }
}
- }
+ const char* inputFile = nullptr;
+ const char* outputFile = nullptr;
+ const char* farFile = nullptr;
+ int effectEn[PREPROC_NUM_EFFECTS] = {0};
- // Set Config Params of the effects
- if (effectEn[PREPROC_AGC]) {
- if (int status = preProcSetConfigParam(AGC_PARAM_TARGET_LEVEL,
- (uint32_t)preProcCfgParams.agcTargetLevel,
- effectHandle[PREPROC_AGC]);
- status != 0) {
- ALOGE("Invalid AGC Target Level. Error %d\n", status);
- return EXIT_FAILURE;
- }
- if (int status =
- preProcSetConfigParam(AGC_PARAM_COMP_GAIN, (uint32_t)preProcCfgParams.agcCompLevel,
- effectHandle[PREPROC_AGC]);
- status != 0) {
- ALOGE("Invalid AGC Comp Gain. Error %d\n", status);
- return EXIT_FAILURE;
- }
- }
+ const option long_opts[] = {
+ {"help", no_argument, nullptr, ARG_HELP},
+ {"input", required_argument, nullptr, ARG_INPUT},
+ {"output", required_argument, nullptr, ARG_OUTPUT},
+ {"far", required_argument, nullptr, ARG_FAR},
+ {"fs", required_argument, nullptr, ARG_FS},
+ {"ch_mask", required_argument, nullptr, ARG_CH_MASK},
+ {"agc_tgt_lvl", required_argument, nullptr, ARG_AGC_TGT_LVL},
+ {"agc_comp_lvl", required_argument, nullptr, ARG_AGC_COMP_LVL},
#ifndef WEBRTC_LEGACY
- if (effectEn[PREPROC_AGC2]) {
- if (int status = preProcSetConfigParam(AGC2_PARAM_FIXED_DIGITAL_GAIN,
- (float)preProcCfgParams.agc2Gain,
- effectHandle[PREPROC_AGC2]);
- status != 0) {
- ALOGE("Invalid AGC2 Fixed Digital Gain. Error %d\n", status);
- return EXIT_FAILURE;
- }
- if (int status = preProcSetConfigParam(AGC2_PARAM_ADAPT_DIGI_LEVEL_ESTIMATOR,
- (uint32_t)preProcCfgParams.agc2Level,
- effectHandle[PREPROC_AGC2]);
- status != 0) {
- ALOGE("Invalid AGC2 Level Estimator. Error %d\n", status);
- return EXIT_FAILURE;
- }
- if (int status = preProcSetConfigParam(AGC2_PARAM_ADAPT_DIGI_EXTRA_SATURATION_MARGIN,
- (float)preProcCfgParams.agc2SaturationMargin,
- effectHandle[PREPROC_AGC2]);
- status != 0) {
- ALOGE("Invalid AGC2 Saturation Margin. Error %d\n", status);
- return EXIT_FAILURE;
- }
- }
+ {"agc2_gain", required_argument, nullptr, ARG_AGC2_GAIN},
+ {"agc2_lvl", required_argument, nullptr, ARG_AGC2_LVL},
+ {"agc2_sat_mgn", required_argument, nullptr, ARG_AGC2_SAT_MGN},
#endif
- if (effectEn[PREPROC_NS]) {
- if (int status = preProcSetConfigParam(NS_PARAM_LEVEL, (uint32_t)preProcCfgParams.nsLevel,
- effectHandle[PREPROC_NS]);
- status != 0) {
- ALOGE("Invalid Noise Suppression level Error %d\n", status);
- return EXIT_FAILURE;
- }
- }
+ {"aec_delay", required_argument, nullptr, ARG_AEC_DELAY},
+ {"ns_lvl", required_argument, nullptr, ARG_NS_LVL},
+ {"aec", no_argument, &effectEn[PREPROC_AEC], 1},
+ {"agc", no_argument, &effectEn[PREPROC_AGC], 1},
+#ifndef WEBRTC_LEGACY
+ {"agc2", no_argument, &effectEn[PREPROC_AGC2], 1},
+#endif
+ {"ns", no_argument, &effectEn[PREPROC_NS], 1},
+ {nullptr, 0, nullptr, 0},
+ };
+ struct preProcConfigParams_t preProcCfgParams {};
- // Process Call
- const int frameLength = (int)(preProcCfgParams.samplingFreq * kTenMilliSecVal);
- const int ioChannelCount = audio_channel_count_from_in_mask(preProcCfgParams.chMask);
- const int ioFrameSize = ioChannelCount * sizeof(short);
- int frameCounter = 0;
- while (true) {
- std::vector<short> in(frameLength * ioChannelCount);
- std::vector<short> out(frameLength * ioChannelCount);
- std::vector<short> farIn(frameLength * ioChannelCount);
- size_t samplesRead = fread(in.data(), ioFrameSize, frameLength, inputFp.get());
- if (samplesRead == 0) {
- break;
+ while (true) {
+ const int opt = getopt_long(argc, (char* const*)argv, "i:o:", long_opts, nullptr);
+ if (opt == -1) {
+ break;
+ }
+ switch (opt) {
+ case ARG_HELP:
+ printUsage();
+ return 0;
+ case ARG_INPUT: {
+ inputFile = (char*)optarg;
+ break;
+ }
+ case ARG_OUTPUT: {
+ outputFile = (char*)optarg;
+ break;
+ }
+ case ARG_FAR: {
+ farFile = (char*)optarg;
+ break;
+ }
+ case ARG_FS: {
+ preProcCfgParams.samplingFreq = atoi(optarg);
+ break;
+ }
+ case ARG_CH_MASK: {
+ int chMaskIdx = atoi(optarg);
+ if (chMaskIdx < 0 or chMaskIdx > kPreProcConfigChMaskCount) {
+ ALOGE("Channel Mask index not in correct range\n");
+ printUsage();
+ return EXIT_FAILURE;
+ }
+ preProcCfgParams.chMask = kPreProcConfigChMask[chMaskIdx];
+ break;
+ }
+ case ARG_AGC_TGT_LVL: {
+ preProcCfgParams.agcTargetLevel = atoi(optarg);
+ break;
+ }
+ case ARG_AGC_COMP_LVL: {
+ preProcCfgParams.agcCompLevel = atoi(optarg);
+ break;
+ }
+#ifndef WEBRTC_LEGACY
+ case ARG_AGC2_GAIN: {
+ preProcCfgParams.agc2Gain = atof(optarg);
+ break;
+ }
+ case ARG_AGC2_LVL: {
+ preProcCfgParams.agc2Level = atoi(optarg);
+ break;
+ }
+ case ARG_AGC2_SAT_MGN: {
+ preProcCfgParams.agc2SaturationMargin = atof(optarg);
+ break;
+ }
+#endif
+ case ARG_AEC_DELAY: {
+ preProcCfgParams.aecDelay = atoi(optarg);
+ break;
+ }
+ case ARG_NS_LVL: {
+ preProcCfgParams.nsLevel = atoi(optarg);
+ break;
+ }
+ default:
+ break;
+ }
}
- audio_buffer_t inputBuffer, outputBuffer;
- audio_buffer_t farInBuffer{};
- inputBuffer.frameCount = samplesRead;
- outputBuffer.frameCount = samplesRead;
- inputBuffer.s16 = in.data();
- outputBuffer.s16 = out.data();
- if (farFp != nullptr) {
- samplesRead = fread(farIn.data(), ioFrameSize, frameLength, farFp.get());
- if (samplesRead == 0) {
- break;
- }
- farInBuffer.frameCount = samplesRead;
- farInBuffer.s16 = farIn.data();
+ if (inputFile == nullptr) {
+ ALOGE("Error: missing input file\n");
+ printUsage();
+ return EXIT_FAILURE;
+ }
+
+ std::unique_ptr<FILE, decltype(&fclose)> inputFp(fopen(inputFile, "rb"), &fclose);
+ if (inputFp == nullptr) {
+ ALOGE("Cannot open input file %s\n", inputFile);
+ return EXIT_FAILURE;
+ }
+
+ std::unique_ptr<FILE, decltype(&fclose)> farFp(fopen(farFile, "rb"), &fclose);
+ std::unique_ptr<FILE, decltype(&fclose)> outputFp(fopen(outputFile, "wb"), &fclose);
+ if (effectEn[PREPROC_AEC]) {
+ if (farFile == nullptr) {
+ ALOGE("Far end signal file required for echo cancellation \n");
+ return EXIT_FAILURE;
+ }
+ if (farFp == nullptr) {
+ ALOGE("Cannot open far end stream file %s\n", farFile);
+ return EXIT_FAILURE;
+ }
+ struct stat statInput, statFar;
+ (void)fstat(fileno(inputFp.get()), &statInput);
+ (void)fstat(fileno(farFp.get()), &statFar);
+ if (statInput.st_size != statFar.st_size) {
+ ALOGE("Near and far end signals are of different sizes");
+ return EXIT_FAILURE;
+ }
+ }
+ if (outputFile != nullptr && outputFp == nullptr) {
+ ALOGE("Cannot open output file %s\n", outputFile);
+ return EXIT_FAILURE;
+ }
+
+ int32_t sessionId = 1;
+ int32_t ioId = 1;
+ effect_handle_t effectHandle[PREPROC_NUM_EFFECTS] = {nullptr};
+ effect_config_t config;
+ config.inputCfg.samplingRate = config.outputCfg.samplingRate = preProcCfgParams.samplingFreq;
+ config.inputCfg.channels = config.outputCfg.channels = preProcCfgParams.chMask;
+ config.inputCfg.format = config.outputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
+
+ // Create all the effect handles
+ for (int i = 0; i < PREPROC_NUM_EFFECTS; i++) {
+ if (int status = preProcCreateEffect(&effectHandle[i], i, &config, sessionId, ioId);
+ status != 0) {
+ ALOGE("Create effect call returned error %i", status);
+ return EXIT_FAILURE;
+ }
}
for (int i = 0; i < PREPROC_NUM_EFFECTS; i++) {
- if (effectEn[i] == 1) {
- if (i == PREPROC_AEC) {
- if (int status =
- preProcSetConfigParam(AEC_PARAM_ECHO_DELAY, (uint32_t)preProcCfgParams.aecDelay,
- effectHandle[PREPROC_AEC]);
- status != 0) {
- ALOGE("preProcSetConfigParam returned Error %d\n", status);
- return EXIT_FAILURE;
- }
+ if (effectEn[i] == 1) {
+ int reply = 0;
+ uint32_t replySize = sizeof(reply);
+ (*effectHandle[i])
+ ->command(effectHandle[i], EFFECT_CMD_ENABLE, 0, nullptr, &replySize, &reply);
+ if (reply != 0) {
+ ALOGE("Command enable call returned error %d\n", reply);
+ return EXIT_FAILURE;
+ }
}
- if (int status =
- (*effectHandle[i])->process(effectHandle[i], &inputBuffer, &outputBuffer);
+ }
+
+ // Set Config Params of the effects
+ if (effectEn[PREPROC_AGC]) {
+ if (int status = preProcSetConfigParam(AGC_PARAM_TARGET_LEVEL,
+ (uint32_t)preProcCfgParams.agcTargetLevel,
+ effectHandle[PREPROC_AGC]);
status != 0) {
- ALOGE("\nError: Process i = %d returned with error %d\n", i, status);
- return EXIT_FAILURE;
- }
- if (i == PREPROC_AEC) {
- if (int status = (*effectHandle[i])
- ->process_reverse(effectHandle[i], &farInBuffer, &outputBuffer);
- status != 0) {
- ALOGE("\nError: Process reverse i = %d returned with error %d\n", i, status);
+ ALOGE("Invalid AGC Target Level. Error %d\n", status);
return EXIT_FAILURE;
- }
}
- }
+ if (int status = preProcSetConfigParam(AGC_PARAM_COMP_GAIN,
+ (uint32_t)preProcCfgParams.agcCompLevel,
+ effectHandle[PREPROC_AGC]);
+ status != 0) {
+ ALOGE("Invalid AGC Comp Gain. Error %d\n", status);
+ return EXIT_FAILURE;
+ }
}
- if (outputFp != nullptr) {
- size_t samplesWritten =
- fwrite(out.data(), ioFrameSize, outputBuffer.frameCount, outputFp.get());
- if (samplesWritten != outputBuffer.frameCount) {
- ALOGE("\nError: Output file writing failed");
- break;
- }
+#ifndef WEBRTC_LEGACY
+ if (effectEn[PREPROC_AGC2]) {
+ if (int status = preProcSetConfigParam(AGC2_PARAM_FIXED_DIGITAL_GAIN,
+ (float)preProcCfgParams.agc2Gain,
+ effectHandle[PREPROC_AGC2]);
+ status != 0) {
+ ALOGE("Invalid AGC2 Fixed Digital Gain. Error %d\n", status);
+ return EXIT_FAILURE;
+ }
+ if (int status = preProcSetConfigParam(AGC2_PARAM_ADAPT_DIGI_LEVEL_ESTIMATOR,
+ (uint32_t)preProcCfgParams.agc2Level,
+ effectHandle[PREPROC_AGC2]);
+ status != 0) {
+ ALOGE("Invalid AGC2 Level Estimator. Error %d\n", status);
+ return EXIT_FAILURE;
+ }
+ if (int status = preProcSetConfigParam(AGC2_PARAM_ADAPT_DIGI_EXTRA_SATURATION_MARGIN,
+ (float)preProcCfgParams.agc2SaturationMargin,
+ effectHandle[PREPROC_AGC2]);
+ status != 0) {
+ ALOGE("Invalid AGC2 Saturation Margin. Error %d\n", status);
+ return EXIT_FAILURE;
+ }
}
- frameCounter += frameLength;
- }
- // Release all the effect handles created
- for (int i = 0; i < PREPROC_NUM_EFFECTS; i++) {
- if (int status = AUDIO_EFFECT_LIBRARY_INFO_SYM.release_effect(effectHandle[i]);
- status != 0) {
- ALOGE("Audio Preprocessing release returned an error = %d\n", status);
- return EXIT_FAILURE;
+#endif
+ if (effectEn[PREPROC_NS]) {
+ if (int status = preProcSetConfigParam(NS_PARAM_LEVEL, (uint32_t)preProcCfgParams.nsLevel,
+ effectHandle[PREPROC_NS]);
+ status != 0) {
+ ALOGE("Invalid Noise Suppression level Error %d\n", status);
+ return EXIT_FAILURE;
+ }
}
- }
- return EXIT_SUCCESS;
+
+ // Process Call
+ const int frameLength = (int)(preProcCfgParams.samplingFreq * kTenMilliSecVal);
+ const int ioChannelCount = audio_channel_count_from_in_mask(preProcCfgParams.chMask);
+ const int ioFrameSize = ioChannelCount * sizeof(short);
+ int frameCounter = 0;
+ while (true) {
+ std::vector<short> in(frameLength * ioChannelCount);
+ std::vector<short> out(frameLength * ioChannelCount);
+ std::vector<short> farIn(frameLength * ioChannelCount);
+ size_t samplesRead = fread(in.data(), ioFrameSize, frameLength, inputFp.get());
+ if (samplesRead == 0) {
+ break;
+ }
+ audio_buffer_t inputBuffer, outputBuffer;
+ audio_buffer_t farInBuffer{};
+ inputBuffer.frameCount = samplesRead;
+ outputBuffer.frameCount = samplesRead;
+ inputBuffer.s16 = in.data();
+ outputBuffer.s16 = out.data();
+
+ if (farFp != nullptr) {
+ samplesRead = fread(farIn.data(), ioFrameSize, frameLength, farFp.get());
+ if (samplesRead == 0) {
+ break;
+ }
+ farInBuffer.frameCount = samplesRead;
+ farInBuffer.s16 = farIn.data();
+ }
+
+ for (int i = 0; i < PREPROC_NUM_EFFECTS; i++) {
+ if (effectEn[i] == 1) {
+ if (i == PREPROC_AEC) {
+ if (int status = preProcSetConfigParam(AEC_PARAM_ECHO_DELAY,
+ (uint32_t)preProcCfgParams.aecDelay,
+ effectHandle[PREPROC_AEC]);
+ status != 0) {
+ ALOGE("preProcSetConfigParam returned Error %d\n", status);
+ return EXIT_FAILURE;
+ }
+ }
+ if (int status = (*effectHandle[i])
+ ->process(effectHandle[i], &inputBuffer, &outputBuffer);
+ status != 0) {
+ ALOGE("\nError: Process i = %d returned with error %d\n", i, status);
+ return EXIT_FAILURE;
+ }
+ if (i == PREPROC_AEC) {
+ if (int status = (*effectHandle[i])
+ ->process_reverse(effectHandle[i], &farInBuffer,
+ &outputBuffer);
+ status != 0) {
+ ALOGE("\nError: Process reverse i = %d returned with error %d\n", i,
+ status);
+ return EXIT_FAILURE;
+ }
+ }
+ }
+ }
+ if (outputFp != nullptr) {
+ size_t samplesWritten =
+ fwrite(out.data(), ioFrameSize, outputBuffer.frameCount, outputFp.get());
+ if (samplesWritten != outputBuffer.frameCount) {
+ ALOGE("\nError: Output file writing failed");
+ break;
+ }
+ }
+ frameCounter += frameLength;
+ }
+ // Release all the effect handles created
+ for (int i = 0; i < PREPROC_NUM_EFFECTS; i++) {
+ if (int status = AUDIO_EFFECT_LIBRARY_INFO_SYM.release_effect(effectHandle[i]);
+ status != 0) {
+ ALOGE("Audio Preprocessing release returned an error = %d\n", status);
+ return EXIT_FAILURE;
+ }
+ }
+ return EXIT_SUCCESS;
}
diff --git a/media/libmediatranscoding/TEST_MAPPING b/media/libmediatranscoding/TEST_MAPPING
new file mode 100644
index 0000000..f8a9db9
--- /dev/null
+++ b/media/libmediatranscoding/TEST_MAPPING
@@ -0,0 +1,32 @@
+{
+ "presubmit": [
+ {
+ "name": "MediaSampleQueueTests"
+ },
+ {
+ "name": "MediaSampleReaderNDKTests"
+ },
+ {
+ "name": "MediaSampleWriterTests"
+ },
+ {
+ "name": "MediaTrackTranscoderTests"
+ },
+ {
+ "name": "MediaTranscoderTests"
+ },
+ {
+ "name": "PassthroughTrackTranscoderTests"
+ },
+ {
+ "name": "TranscodingClientManager_tests"
+ },
+ {
+ "name": "TranscodingSessionController_tests"
+ },
+ {
+ "name": "VideoTrackTranscoderTests"
+ }
+ ]
+}
+
diff --git a/media/libmediatranscoding/TranscoderWrapper.cpp b/media/libmediatranscoding/TranscoderWrapper.cpp
index 8cc8dd2..da86187 100644
--- a/media/libmediatranscoding/TranscoderWrapper.cpp
+++ b/media/libmediatranscoding/TranscoderWrapper.cpp
@@ -192,7 +192,7 @@
new ndk::ScopedAParcel());
}
- callback->onResourceLost();
+ callback->onResourceLost(clientId, sessionId);
} else {
callback->onError(clientId, sessionId, toTranscodingError(err));
}
diff --git a/media/libmediatranscoding/TranscodingSessionController.cpp b/media/libmediatranscoding/TranscodingSessionController.cpp
index bab25c6..49a7083 100644
--- a/media/libmediatranscoding/TranscodingSessionController.cpp
+++ b/media/libmediatranscoding/TranscodingSessionController.cpp
@@ -485,30 +485,34 @@
});
}
-void TranscodingSessionController::onResourceLost() {
+void TranscodingSessionController::onResourceLost(ClientIdType clientId, SessionIdType sessionId) {
ALOGI("%s", __FUNCTION__);
- std::scoped_lock lock{mLock};
-
- if (mResourceLost) {
- return;
- }
-
- // If we receive a resource loss event, the TranscoderLibrary already paused
- // the transcoding, so we don't need to call onPaused to notify it to pause.
- // Only need to update the session state here.
- if (mCurrentSession != nullptr && mCurrentSession->state == Session::RUNNING) {
- mCurrentSession->state = Session::PAUSED;
- // Notify the client as a paused event.
- auto clientCallback = mCurrentSession->callback.lock();
- if (clientCallback != nullptr) {
- clientCallback->onTranscodingPaused(mCurrentSession->key.second);
+ notifyClient(clientId, sessionId, "resource_lost", [=](const SessionKeyType& sessionKey) {
+ if (mResourceLost) {
+ return;
}
- mResourcePolicy->setPidResourceLost(mCurrentSession->request.clientPid);
- }
- mResourceLost = true;
- validateState_l();
+ Session* resourceLostSession = &mSessionMap[sessionKey];
+ if (resourceLostSession->state != Session::RUNNING) {
+ ALOGW("session %s lost resource but is no longer running",
+ sessionToString(sessionKey).c_str());
+ return;
+ }
+ // If we receive a resource loss event, the transcoder already paused the transcoding,
+ // so we don't need to call onPaused() to pause it. However, we still need to notify
+ // the client and update the session state here.
+ resourceLostSession->state = Session::PAUSED;
+ // Notify the client as a paused event.
+ auto clientCallback = resourceLostSession->callback.lock();
+ if (clientCallback != nullptr) {
+ clientCallback->onTranscodingPaused(sessionKey.second);
+ }
+ mResourcePolicy->setPidResourceLost(resourceLostSession->request.clientPid);
+ mResourceLost = true;
+
+ validateState_l();
+ });
}
void TranscodingSessionController::onTopUidsChanged(const std::unordered_set<uid_t>& uids) {
diff --git a/media/libmediatranscoding/include/media/TranscoderInterface.h b/media/libmediatranscoding/include/media/TranscoderInterface.h
index e17cd5a..6268aa5 100644
--- a/media/libmediatranscoding/include/media/TranscoderInterface.h
+++ b/media/libmediatranscoding/include/media/TranscoderInterface.h
@@ -64,7 +64,7 @@
// If there is any session currently running, it will be paused. When resource contention
// is solved, the controller should call TranscoderInterface's to either start a new session,
// or resume a paused session.
- virtual void onResourceLost() = 0;
+ virtual void onResourceLost(ClientIdType clientId, SessionIdType sessionId) = 0;
protected:
virtual ~TranscoderCallbackInterface() = default;
diff --git a/media/libmediatranscoding/include/media/TranscodingSessionController.h b/media/libmediatranscoding/include/media/TranscodingSessionController.h
index c082074..4215e06 100644
--- a/media/libmediatranscoding/include/media/TranscodingSessionController.h
+++ b/media/libmediatranscoding/include/media/TranscodingSessionController.h
@@ -58,7 +58,7 @@
void onError(ClientIdType clientId, SessionIdType sessionId, TranscodingErrorCode err) override;
void onProgressUpdate(ClientIdType clientId, SessionIdType sessionId,
int32_t progress) override;
- void onResourceLost() override;
+ void onResourceLost(ClientIdType clientId, SessionIdType sessionId) override;
// ~TranscoderCallbackInterface
// UidPolicyCallbackInterface
diff --git a/media/libmediatranscoding/tests/Android.bp b/media/libmediatranscoding/tests/Android.bp
index 7b15b1b..e49df35 100644
--- a/media/libmediatranscoding/tests/Android.bp
+++ b/media/libmediatranscoding/tests/Android.bp
@@ -1,4 +1,10 @@
// Build the unit tests for libmediatranscoding.
+filegroup {
+ name: "test_assets",
+ path: "assets",
+ srcs: ["assets/**/*"],
+}
+
cc_defaults {
name: "libmediatranscoding_test_defaults",
diff --git a/media/libmediatranscoding/tests/TranscodingSessionController_tests.cpp b/media/libmediatranscoding/tests/TranscodingSessionController_tests.cpp
index 4809d7a..fa52f63 100644
--- a/media/libmediatranscoding/tests/TranscodingSessionController_tests.cpp
+++ b/media/libmediatranscoding/tests/TranscodingSessionController_tests.cpp
@@ -44,11 +44,14 @@
constexpr ClientIdType kClientId = 1000;
constexpr SessionIdType kClientSessionId = 0;
constexpr uid_t kClientUid = 5000;
+constexpr pid_t kClientPid = 10000;
constexpr uid_t kInvalidUid = (uid_t)-1;
+constexpr pid_t kInvalidPid = (pid_t)-1;
#define CLIENT(n) (kClientId + (n))
#define SESSION(n) (kClientSessionId + (n))
#define UID(n) (kClientUid + (n))
+#define PID(n) (kClientPid + (n))
class TestUidPolicy : public UidPolicyInterface {
public:
@@ -79,6 +82,31 @@
std::weak_ptr<UidPolicyCallbackInterface> mUidPolicyCallback;
};
+class TestResourcePolicy : public ResourcePolicyInterface {
+public:
+ TestResourcePolicy() { reset(); }
+ virtual ~TestResourcePolicy() = default;
+
+ // ResourcePolicyInterface
+ void setCallback(const std::shared_ptr<ResourcePolicyCallbackInterface>& /*cb*/) override {}
+ void setPidResourceLost(pid_t pid) override {
+ mResourceLostPid = pid;
+ }
+ // ~ResourcePolicyInterface
+
+ pid_t getPid() {
+ pid_t result = mResourceLostPid;
+ reset();
+ return result;
+ }
+
+private:
+ void reset() {
+ mResourceLostPid = kInvalidPid;
+ }
+ pid_t mResourceLostPid;
+};
+
class TestTranscoder : public TranscoderInterface {
public:
TestTranscoder() : mLastError(TranscodingErrorCode::kUnknown) {}
@@ -216,8 +244,9 @@
ALOGI("TranscodingSessionControllerTest set up");
mTranscoder.reset(new TestTranscoder());
mUidPolicy.reset(new TestUidPolicy());
- mController.reset(new TranscodingSessionController(mTranscoder, mUidPolicy,
- nullptr /*resourcePolicy*/));
+ mResourcePolicy.reset(new TestResourcePolicy());
+ mController.reset(
+ new TranscodingSessionController(mTranscoder, mUidPolicy, mResourcePolicy));
mUidPolicy->setCallback(mController);
// Set priority only, ignore other fields for now.
@@ -239,6 +268,7 @@
std::shared_ptr<TestTranscoder> mTranscoder;
std::shared_ptr<TestUidPolicy> mUidPolicy;
+ std::shared_ptr<TestResourcePolicy> mResourcePolicy;
std::shared_ptr<TranscodingSessionController> mController;
TranscodingRequestParcel mOfflineRequest;
TranscodingRequestParcel mRealtimeRequest;
@@ -552,10 +582,12 @@
// Start with unspecified top UID.
// Submit real-time session to CLIENT(0), session should start immediately.
+ mRealtimeRequest.clientPid = PID(0);
mController->submit(CLIENT(0), SESSION(0), UID(0), mRealtimeRequest, mClientCallback0);
EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(0)));
// Submit offline session to CLIENT(0), should not start.
+ mOfflineRequest.clientPid = PID(0);
mController->submit(CLIENT(1), SESSION(0), UID(0), mOfflineRequest, mClientCallback1);
EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
@@ -565,13 +597,22 @@
// Submit real-time session to CLIENT(2) in different uid UID(1).
// Should pause previous session and start new session.
+ mRealtimeRequest.clientPid = PID(1);
mController->submit(CLIENT(2), SESSION(0), UID(1), mRealtimeRequest, mClientCallback2);
EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Pause(CLIENT(0), SESSION(0)));
EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(2), SESSION(0)));
+ // Test 0: No call into ResourcePolicy if resource lost is from a non-running
+ // or non-existent session.
+ mController->onResourceLost(CLIENT(0), SESSION(0));
+ EXPECT_EQ(mResourcePolicy->getPid(), kInvalidPid);
+ mController->onResourceLost(CLIENT(3), SESSION(0));
+ EXPECT_EQ(mResourcePolicy->getPid(), kInvalidPid);
+
// Test 1: No queue change during resource loss.
// Signal resource lost.
- mController->onResourceLost();
+ mController->onResourceLost(CLIENT(2), SESSION(0));
+ EXPECT_EQ(mResourcePolicy->getPid(), PID(1));
EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
// Signal resource available, CLIENT(2) should resume.
@@ -580,7 +621,8 @@
// Test 2: Change of queue order during resource loss.
// Signal resource lost.
- mController->onResourceLost();
+ mController->onResourceLost(CLIENT(2), SESSION(0));
+ EXPECT_EQ(mResourcePolicy->getPid(), PID(1));
EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
// Move UID(0) back to top, should have no resume due to no resource.
@@ -593,13 +635,15 @@
// Test 3: Adding new queue during resource loss.
// Signal resource lost.
- mController->onResourceLost();
+ mController->onResourceLost(CLIENT(0), SESSION(0));
+ EXPECT_EQ(mResourcePolicy->getPid(), PID(0));
EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
// Move UID(2) to top.
mUidPolicy->setTop(UID(2));
// Submit real-time session to CLIENT(3) in UID(2), session shouldn't start due to no resource.
+ mRealtimeRequest.clientPid = PID(2);
mController->submit(CLIENT(3), SESSION(0), UID(2), mRealtimeRequest, mClientCallback3);
EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
diff --git a/media/libmediatranscoding/tests/assets/backyard_hevc_1920x1080_20Mbps.mp4 b/media/libmediatranscoding/tests/assets/TranscodingTestAssets/backyard_hevc_1920x1080_20Mbps.mp4
similarity index 100%
rename from media/libmediatranscoding/tests/assets/backyard_hevc_1920x1080_20Mbps.mp4
rename to media/libmediatranscoding/tests/assets/TranscodingTestAssets/backyard_hevc_1920x1080_20Mbps.mp4
Binary files differ
diff --git a/media/libmediatranscoding/tests/assets/cubicle_avc_480x240_aac_24KHz.mp4 b/media/libmediatranscoding/tests/assets/TranscodingTestAssets/cubicle_avc_480x240_aac_24KHz.mp4
similarity index 100%
rename from media/libmediatranscoding/tests/assets/cubicle_avc_480x240_aac_24KHz.mp4
rename to media/libmediatranscoding/tests/assets/TranscodingTestAssets/cubicle_avc_480x240_aac_24KHz.mp4
Binary files differ
diff --git a/media/libmediatranscoding/tests/assets/desk_hevc_1920x1080_aac_48KHz_rot90.mp4 b/media/libmediatranscoding/tests/assets/TranscodingTestAssets/desk_hevc_1920x1080_aac_48KHz_rot90.mp4
similarity index 100%
rename from media/libmediatranscoding/tests/assets/desk_hevc_1920x1080_aac_48KHz_rot90.mp4
rename to media/libmediatranscoding/tests/assets/TranscodingTestAssets/desk_hevc_1920x1080_aac_48KHz_rot90.mp4
Binary files differ
diff --git a/media/libmediatranscoding/tests/assets/jets_hevc_1280x720_20Mbps.mp4 b/media/libmediatranscoding/tests/assets/TranscodingTestAssets/jets_hevc_1280x720_20Mbps.mp4
similarity index 100%
rename from media/libmediatranscoding/tests/assets/jets_hevc_1280x720_20Mbps.mp4
rename to media/libmediatranscoding/tests/assets/TranscodingTestAssets/jets_hevc_1280x720_20Mbps.mp4
Binary files differ
diff --git a/media/libmediatranscoding/tests/assets/longtest_15s.mp4 b/media/libmediatranscoding/tests/assets/TranscodingTestAssets/longtest_15s.mp4
similarity index 100%
rename from media/libmediatranscoding/tests/assets/longtest_15s.mp4
rename to media/libmediatranscoding/tests/assets/TranscodingTestAssets/longtest_15s.mp4
Binary files differ
diff --git a/media/libmediatranscoding/tests/assets/plex_hevc_3840x2160_12Mbps.mp4 b/media/libmediatranscoding/tests/assets/TranscodingTestAssets/plex_hevc_3840x2160_12Mbps.mp4
similarity index 100%
rename from media/libmediatranscoding/tests/assets/plex_hevc_3840x2160_12Mbps.mp4
rename to media/libmediatranscoding/tests/assets/TranscodingTestAssets/plex_hevc_3840x2160_12Mbps.mp4
Binary files differ
diff --git a/media/libmediatranscoding/tests/assets/plex_hevc_3840x2160_20Mbps.mp4 b/media/libmediatranscoding/tests/assets/TranscodingTestAssets/plex_hevc_3840x2160_20Mbps.mp4
similarity index 100%
rename from media/libmediatranscoding/tests/assets/plex_hevc_3840x2160_20Mbps.mp4
rename to media/libmediatranscoding/tests/assets/TranscodingTestAssets/plex_hevc_3840x2160_20Mbps.mp4
Binary files differ
diff --git a/media/libmediatranscoding/tests/assets/push_assets.sh b/media/libmediatranscoding/tests/push_assets.sh
similarity index 93%
rename from media/libmediatranscoding/tests/assets/push_assets.sh
rename to media/libmediatranscoding/tests/push_assets.sh
index 8afc947..cc71514 100755
--- a/media/libmediatranscoding/tests/assets/push_assets.sh
+++ b/media/libmediatranscoding/tests/push_assets.sh
@@ -23,7 +23,7 @@
adb shell mkdir -p /data/local/tmp/TranscodingTestAssets
-FILES=$ANDROID_BUILD_TOP/frameworks/av/media/libmediatranscoding/tests/assets/*
+FILES=$ANDROID_BUILD_TOP/frameworks/av/media/libmediatranscoding/tests/assets/TranscodingTestAssets/*
for file in $FILES
do
adb push --sync $file /data/local/tmp/TranscodingTestAssets
diff --git a/media/libmediatranscoding/transcoder/Android.bp b/media/libmediatranscoding/transcoder/Android.bp
index 1896412..aa7cdde 100644
--- a/media/libmediatranscoding/transcoder/Android.bp
+++ b/media/libmediatranscoding/transcoder/Android.bp
@@ -60,16 +60,8 @@
},
}
-cc_library_shared {
+cc_library {
name: "libmediatranscoder",
defaults: ["mediatranscoder_defaults"],
}
-cc_library_shared {
- name: "libmediatranscoder_asan",
- defaults: ["mediatranscoder_defaults"],
-
- sanitize: {
- address: true,
- },
-}
diff --git a/media/libmediatranscoding/transcoder/MediaSampleReaderNDK.cpp b/media/libmediatranscoding/transcoder/MediaSampleReaderNDK.cpp
index 53d567e..92ba818 100644
--- a/media/libmediatranscoding/transcoder/MediaSampleReaderNDK.cpp
+++ b/media/libmediatranscoding/transcoder/MediaSampleReaderNDK.cpp
@@ -99,6 +99,7 @@
}
if (!AMediaExtractor_advance(mExtractor)) {
+ LOG(DEBUG) << " EOS in advanceExtractor_l";
mEosReached = true;
for (auto it = mTrackSignals.begin(); it != mTrackSignals.end(); ++it) {
it->second.notify_all();
@@ -137,6 +138,8 @@
LOG(ERROR) << "Unable to seek to " << seekToTimeUs << ", target " << targetTimeUs;
return status;
}
+
+ mEosReached = false;
mExtractorTrackIndex = AMediaExtractor_getSampleTrackIndex(mExtractor);
int64_t sampleTimeUs = AMediaExtractor_getSampleTime(mExtractor);
@@ -181,6 +184,11 @@
if (mEosReached) {
return AMEDIA_ERROR_END_OF_STREAM;
}
+
+ if (!mEnforceSequentialAccess) {
+ return moveToTrack_l(trackIndex);
+ }
+
return AMEDIA_OK;
}
@@ -228,6 +236,8 @@
}
media_status_t MediaSampleReaderNDK::setEnforceSequentialAccess(bool enforce) {
+ LOG(DEBUG) << "setEnforceSequentialAccess( " << enforce << " )";
+
std::scoped_lock lock(mExtractorMutex);
if (mEnforceSequentialAccess && !enforce) {
@@ -369,7 +379,11 @@
info->presentationTimeUs = 0;
info->flags = SAMPLE_FLAG_END_OF_STREAM;
info->size = 0;
+ LOG(DEBUG) << " getSampleInfoForTrack #" << trackIndex << ": End Of Stream";
+ } else {
+ LOG(ERROR) << " getSampleInfoForTrack #" << trackIndex << ": Error " << status;
}
+
return status;
}
diff --git a/media/libmediatranscoding/transcoder/MediaSampleWriter.cpp b/media/libmediatranscoding/transcoder/MediaSampleWriter.cpp
index afa5021..389b941 100644
--- a/media/libmediatranscoding/transcoder/MediaSampleWriter.cpp
+++ b/media/libmediatranscoding/transcoder/MediaSampleWriter.cpp
@@ -79,7 +79,7 @@
MediaSampleWriter::~MediaSampleWriter() {
if (mState == STARTED) {
- stop(); // Join thread.
+ stop();
}
}
@@ -169,38 +169,41 @@
}
mState = STARTED;
- mThread = std::thread([this] {
- media_status_t status = writeSamples();
+ std::thread([this] {
+ bool wasStopped = false;
+ media_status_t status = writeSamples(&wasStopped);
if (auto callbacks = mCallbacks.lock()) {
- callbacks->onFinished(this, status);
+ if (wasStopped && status == AMEDIA_OK) {
+ callbacks->onStopped(this);
+ } else {
+ callbacks->onFinished(this, status);
+ }
}
- });
+ }).detach();
return true;
}
-bool MediaSampleWriter::stop() {
+void MediaSampleWriter::stop() {
{
std::scoped_lock lock(mMutex);
if (mState != STARTED) {
LOG(ERROR) << "Sample writer is not started.";
- return false;
+ return;
}
mState = STOPPED;
}
mSampleSignal.notify_all();
- mThread.join();
- return true;
}
-media_status_t MediaSampleWriter::writeSamples() {
+media_status_t MediaSampleWriter::writeSamples(bool* wasStopped) {
media_status_t muxerStatus = mMuxer->start();
if (muxerStatus != AMEDIA_OK) {
LOG(ERROR) << "Error starting muxer: " << muxerStatus;
return muxerStatus;
}
- media_status_t writeStatus = runWriterLoop();
+ media_status_t writeStatus = runWriterLoop(wasStopped);
if (writeStatus != AMEDIA_OK) {
LOG(ERROR) << "Error writing samples: " << writeStatus;
}
@@ -213,7 +216,7 @@
return writeStatus != AMEDIA_OK ? writeStatus : muxerStatus;
}
-media_status_t MediaSampleWriter::runWriterLoop() NO_THREAD_SAFETY_ANALYSIS {
+media_status_t MediaSampleWriter::runWriterLoop(bool* wasStopped) NO_THREAD_SAFETY_ANALYSIS {
AMediaCodecBufferInfo bufferInfo;
int32_t lastProgressUpdate = 0;
int trackEosCount = 0;
@@ -242,8 +245,9 @@
mSampleSignal.wait(lock);
}
- if (mState != STARTED) {
- return AMEDIA_ERROR_UNKNOWN; // TODO(lnilsson): Custom error code.
+ if (mState == STOPPED) {
+ *wasStopped = true;
+ return AMEDIA_OK;
}
auto& topEntry = mSampleQueue.top();
diff --git a/media/libmediatranscoding/transcoder/MediaTrackTranscoder.cpp b/media/libmediatranscoding/transcoder/MediaTrackTranscoder.cpp
index 698594f..15f7427 100644
--- a/media/libmediatranscoding/transcoder/MediaTrackTranscoder.cpp
+++ b/media/libmediatranscoding/transcoder/MediaTrackTranscoder.cpp
@@ -69,41 +69,44 @@
LOG(ERROR) << "TrackTranscoder must be configured before started";
return false;
}
+ mState = STARTED;
- mTranscodingThread = std::thread([this] {
- media_status_t status = runTranscodeLoop();
+ std::thread([this] {
+ bool stopped = false;
+ media_status_t status = runTranscodeLoop(&stopped);
+
+ // Output an EOS sample if the transcoder was stopped.
+ if (stopped) {
+ auto sample = std::make_shared<MediaSample>();
+ sample->info.flags = SAMPLE_FLAG_END_OF_STREAM;
+ onOutputSampleAvailable(sample);
+ }
// Notify the client.
if (auto callbacks = mTranscoderCallback.lock()) {
- if (status != AMEDIA_OK) {
- callbacks->onTrackError(this, status);
- } else {
+ if (stopped) {
+ callbacks->onTrackStopped(this);
+ } else if (status == AMEDIA_OK) {
callbacks->onTrackFinished(this);
+ } else {
+ callbacks->onTrackError(this, status);
}
}
- });
+ }).detach();
- mState = STARTED;
return true;
}
-bool MediaTrackTranscoder::stop() {
+void MediaTrackTranscoder::stop(bool stopOnSyncSample) {
std::scoped_lock lock{mStateMutex};
- if (mState == STARTED) {
+ if (mState == STARTED || (mStopRequest == STOP_ON_SYNC && !stopOnSyncSample)) {
+ mStopRequest = stopOnSyncSample ? STOP_ON_SYNC : STOP_NOW;
abortTranscodeLoop();
- mMediaSampleReader->setEnforceSequentialAccess(false);
- mTranscodingThread.join();
- {
- std::scoped_lock lock{mSampleMutex};
- mSampleQueue.abort(); // Release any buffered samples.
- }
mState = STOPPED;
- return true;
+ } else {
+ LOG(WARNING) << "TrackTranscoder must be started before stopped";
}
-
- LOG(ERROR) << "TrackTranscoder must be started before stopped";
- return false;
}
void MediaTrackTranscoder::notifyTrackFormatAvailable() {
diff --git a/media/libmediatranscoding/transcoder/MediaTranscoder.cpp b/media/libmediatranscoding/transcoder/MediaTranscoder.cpp
index 07df5e0..94a9a33 100644
--- a/media/libmediatranscoding/transcoder/MediaTranscoder.cpp
+++ b/media/libmediatranscoding/transcoder/MediaTranscoder.cpp
@@ -69,38 +69,67 @@
return format;
}
-void MediaTranscoder::sendCallback(media_status_t status) {
- // If the transcoder is already cancelled explicitly, don't send any error callbacks.
- // Tracks and sample writer will report errors for abort. However, currently we can't
- // tell it apart from real errors. Ideally we still want to report real errors back
- // to client, as there is a small chance that explicit abort and the real error come
- // at around the same time, we should report that if abort has a specific error code.
- // On the other hand, if the transcoder actually finished (status is AMEDIA_OK) at around
- // the same time of the abort, we should still report the finish back to the client.
- if (mCancelled && status != AMEDIA_OK) {
+void MediaTranscoder::onThreadFinished(const void* thread, media_status_t threadStatus,
+ bool threadStopped) {
+ LOG(DEBUG) << "Thread " << thread << " finished with status " << threadStatus << " stopped "
+ << threadStopped;
+
+ // Stop all threads if one reports an error.
+ if (threadStatus != AMEDIA_OK) {
+ requestStop(false /* stopOnSync */);
+ }
+
+ std::scoped_lock lock{mThreadStateMutex};
+
+ // Record the change.
+ mThreadStates[thread] = DONE;
+ if (threadStatus != AMEDIA_OK && mTranscoderStatus == AMEDIA_OK) {
+ mTranscoderStatus = threadStatus;
+ }
+
+ mTranscoderStopped |= threadStopped;
+
+ // Check if all threads are done. Note that if all transcoders have stopped but the sample
+ // writer has not yet started, it never will.
+ bool transcodersDone = true;
+ ThreadState sampleWriterState = PENDING;
+ for (const auto& it : mThreadStates) {
+ LOG(DEBUG) << " Thread " << it.first << " state" << it.second;
+ if (it.first == static_cast<const void*>(mSampleWriter.get())) {
+ sampleWriterState = it.second;
+ } else {
+ transcodersDone &= (it.second == DONE);
+ }
+ }
+ if (!transcodersDone || sampleWriterState == RUNNING) {
return;
}
- bool expected = false;
- if (mCallbackSent.compare_exchange_strong(expected, true)) {
- if (status == AMEDIA_OK) {
- mCallbacks->onFinished(this);
- } else {
- mCallbacks->onError(this, status);
- }
-
- // Transcoding is done and the callback to the client has been sent, so tear down the
- // pipeline but do it asynchronously to avoid deadlocks. If an error occurred, client
- // should clean up the file.
- std::thread asyncCancelThread{[self = shared_from_this()] { self->cancel(); }};
- asyncCancelThread.detach();
+ // All done. Send callback asynchronously and wake up threads waiting in cancel/pause.
+ mThreadsDone = true;
+ if (!mCallbackSent) {
+ std::thread asyncNotificationThread{[this, self = shared_from_this(),
+ status = mTranscoderStatus,
+ stopped = mTranscoderStopped] {
+ // If the transcoder was stopped that means a caller is waiting in stop or pause
+ // in which case we don't send a callback.
+ if (status != AMEDIA_OK) {
+ mCallbacks->onError(this, status);
+ } else if (!stopped) {
+ mCallbacks->onFinished(this);
+ }
+ mThreadsDoneSignal.notify_all();
+ }};
+ asyncNotificationThread.detach();
+ mCallbackSent = true;
}
}
void MediaTranscoder::onTrackFormatAvailable(const MediaTrackTranscoder* transcoder) {
- LOG(INFO) << "TrackTranscoder " << transcoder << " format available.";
+ LOG(DEBUG) << "TrackTranscoder " << transcoder << " format available.";
std::scoped_lock lock{mTracksAddedMutex};
+ const void* sampleWriterPtr = static_cast<const void*>(mSampleWriter.get());
// Ignore duplicate format change.
if (mTracksAdded.count(transcoder) > 0) {
@@ -111,7 +140,7 @@
auto consumer = mSampleWriter->addTrack(transcoder->getOutputFormat());
if (consumer == nullptr) {
LOG(ERROR) << "Unable to add track to sample writer.";
- sendCallback(AMEDIA_ERROR_UNKNOWN);
+ onThreadFinished(sampleWriterPtr, AMEDIA_ERROR_UNKNOWN, false /* stopped */);
return;
}
@@ -119,34 +148,57 @@
mutableTranscoder->setSampleConsumer(consumer);
mTracksAdded.insert(transcoder);
+ bool errorStarting = false;
if (mTracksAdded.size() == mTrackTranscoders.size()) {
// Enable sequential access mode on the sample reader to achieve optimal read performance.
// This has to wait until all tracks have delivered their output formats and the sample
// writer is started. Otherwise the tracks will not get their output sample queues drained
// and the transcoder could hang due to one track running out of buffers and blocking the
// other tracks from reading source samples before they could output their formats.
- mSampleReader->setEnforceSequentialAccess(true);
- LOG(INFO) << "Starting sample writer.";
- bool started = mSampleWriter->start();
- if (!started) {
- LOG(ERROR) << "Unable to start sample writer.";
- sendCallback(AMEDIA_ERROR_UNKNOWN);
+
+ std::scoped_lock lock{mThreadStateMutex};
+ // Don't start the sample writer if a stop already has been requested.
+ if (!mSampleWriterStopped) {
+ if (!mCancelled) {
+ mSampleReader->setEnforceSequentialAccess(true);
+ }
+ LOG(DEBUG) << "Starting sample writer.";
+ errorStarting = !mSampleWriter->start();
+ if (!errorStarting) {
+ mThreadStates[sampleWriterPtr] = RUNNING;
+ }
}
}
+
+ if (errorStarting) {
+ LOG(ERROR) << "Unable to start sample writer.";
+ onThreadFinished(sampleWriterPtr, AMEDIA_ERROR_UNKNOWN, false /* stopped */);
+ }
}
void MediaTranscoder::onTrackFinished(const MediaTrackTranscoder* transcoder) {
LOG(DEBUG) << "TrackTranscoder " << transcoder << " finished";
+ onThreadFinished(static_cast<const void*>(transcoder), AMEDIA_OK, false /* stopped */);
+}
+
+void MediaTranscoder::onTrackStopped(const MediaTrackTranscoder* transcoder) {
+ LOG(DEBUG) << "TrackTranscoder " << transcoder << " stopped";
+ onThreadFinished(static_cast<const void*>(transcoder), AMEDIA_OK, true /* stopped */);
}
void MediaTranscoder::onTrackError(const MediaTrackTranscoder* transcoder, media_status_t status) {
LOG(ERROR) << "TrackTranscoder " << transcoder << " returned error " << status;
- sendCallback(status);
+ onThreadFinished(static_cast<const void*>(transcoder), status, false /* stopped */);
}
-void MediaTranscoder::onFinished(const MediaSampleWriter* writer __unused, media_status_t status) {
- LOG((status != AMEDIA_OK) ? ERROR : DEBUG) << "Sample writer finished with status " << status;
- sendCallback(status);
+void MediaTranscoder::onFinished(const MediaSampleWriter* writer, media_status_t status) {
+ LOG(status == AMEDIA_OK ? DEBUG : ERROR) << "Sample writer finished with status " << status;
+ onThreadFinished(static_cast<const void*>(writer), status, false /* stopped */);
+}
+
+void MediaTranscoder::onStopped(const MediaSampleWriter* writer) {
+ LOG(DEBUG) << "Sample writer " << writer << " stopped";
+ onThreadFinished(static_cast<const void*>(writer), AMEDIA_OK, true /* stopped */);
}
void MediaTranscoder::onProgressUpdate(const MediaSampleWriter* writer __unused, int32_t progress) {
@@ -277,6 +329,9 @@
return status;
}
+ std::scoped_lock lock{mThreadStateMutex};
+ mThreadStates[static_cast<const void*>(transcoder.get())] = PENDING;
+
mTrackTranscoders.emplace_back(std::move(transcoder));
return AMEDIA_OK;
}
@@ -301,6 +356,8 @@
return AMEDIA_ERROR_UNKNOWN;
}
+ std::scoped_lock lock{mThreadStateMutex};
+ mThreadStates[static_cast<const void*>(mSampleWriter.get())] = PENDING;
return AMEDIA_OK;
}
@@ -314,21 +371,75 @@
}
// Start transcoders
- for (auto& transcoder : mTrackTranscoders) {
- bool started = transcoder->start();
- if (!started) {
- LOG(ERROR) << "Unable to start track transcoder.";
- cancel();
- return AMEDIA_ERROR_UNKNOWN;
+ bool started = true;
+ {
+ std::scoped_lock lock{mThreadStateMutex};
+ for (auto& transcoder : mTrackTranscoders) {
+ if (!(started = transcoder->start())) {
+ break;
+ }
+ mThreadStates[static_cast<const void*>(transcoder.get())] = RUNNING;
}
}
+ if (!started) {
+ LOG(ERROR) << "Unable to start track transcoder.";
+ cancel();
+ return AMEDIA_ERROR_UNKNOWN;
+ }
return AMEDIA_OK;
}
+media_status_t MediaTranscoder::requestStop(bool stopOnSync) {
+ std::scoped_lock lock{mThreadStateMutex};
+ if (mCancelled) {
+ LOG(DEBUG) << "MediaTranscoder already cancelled";
+ return AMEDIA_ERROR_UNSUPPORTED;
+ }
+
+ if (!stopOnSync) {
+ mSampleWriterStopped = true;
+ mSampleWriter->stop();
+ }
+
+ mSampleReader->setEnforceSequentialAccess(false);
+ for (auto& transcoder : mTrackTranscoders) {
+ transcoder->stop(stopOnSync);
+ }
+
+ mCancelled = true;
+ return AMEDIA_OK;
+}
+
+void MediaTranscoder::waitForThreads() NO_THREAD_SAFETY_ANALYSIS {
+ std::unique_lock lock{mThreadStateMutex};
+ while (!mThreadsDone) {
+ mThreadsDoneSignal.wait(lock);
+ }
+}
+
media_status_t MediaTranscoder::pause(std::shared_ptr<ndk::ScopedAParcel>* pausedState) {
+ media_status_t status = requestStop(true /* stopOnSync */);
+ if (status != AMEDIA_OK) {
+ return status;
+ }
+
+ waitForThreads();
+
// TODO: write internal states to parcel.
*pausedState = std::shared_ptr<::ndk::ScopedAParcel>(new ::ndk::ScopedAParcel());
- return cancel();
+ return AMEDIA_OK;
+}
+
+media_status_t MediaTranscoder::cancel() {
+ media_status_t status = requestStop(false /* stopOnSync */);
+ if (status != AMEDIA_OK) {
+ return status;
+ }
+
+ waitForThreads();
+
+ // TODO: Release transcoders?
+ return AMEDIA_OK;
}
media_status_t MediaTranscoder::resume() {
@@ -336,20 +447,4 @@
return start();
}
-media_status_t MediaTranscoder::cancel() {
- bool expected = false;
- if (!mCancelled.compare_exchange_strong(expected, true)) {
- // Already cancelled.
- return AMEDIA_OK;
- }
-
- mSampleWriter->stop();
- mSampleReader->setEnforceSequentialAccess(false);
- for (auto& transcoder : mTrackTranscoders) {
- transcoder->stop();
- }
-
- return AMEDIA_OK;
-}
-
} // namespace android
diff --git a/media/libmediatranscoding/transcoder/PassthroughTrackTranscoder.cpp b/media/libmediatranscoding/transcoder/PassthroughTrackTranscoder.cpp
index 35b1d33..c55e244 100644
--- a/media/libmediatranscoding/transcoder/PassthroughTrackTranscoder.cpp
+++ b/media/libmediatranscoding/transcoder/PassthroughTrackTranscoder.cpp
@@ -93,9 +93,10 @@
return AMEDIA_OK;
}
-media_status_t PassthroughTrackTranscoder::runTranscodeLoop() {
+media_status_t PassthroughTrackTranscoder::runTranscodeLoop(bool* stopped) {
MediaSampleInfo info;
std::shared_ptr<MediaSample> sample;
+ bool eosReached = false;
// Notify the track format as soon as we start. It's same as the source format.
notifyTrackFormatAvailable();
@@ -106,18 +107,18 @@
};
// Move samples until EOS is reached or transcoding is stopped.
- while (!mStopRequested && !mEosFromSource) {
+ while (mStopRequest != STOP_NOW && !eosReached) {
media_status_t status = mMediaSampleReader->getSampleInfoForTrack(mTrackIndex, &info);
if (status == AMEDIA_OK) {
uint8_t* buffer = mBufferPool->getBufferWithSize(info.size);
if (buffer == nullptr) {
- if (mStopRequested) {
+ if (mStopRequest == STOP_NOW) {
break;
}
LOG(ERROR) << "Unable to get buffer from pool";
- return AMEDIA_ERROR_IO; // TODO: Custom error codes?
+ return AMEDIA_ERROR_UNKNOWN;
}
sample = MediaSample::createWithReleaseCallback(
@@ -131,7 +132,7 @@
} else if (status == AMEDIA_ERROR_END_OF_STREAM) {
sample = std::make_shared<MediaSample>();
- mEosFromSource = true;
+ eosReached = true;
} else {
LOG(ERROR) << "Unable to get next sample info. Aborting transcode.";
return status;
@@ -139,17 +140,22 @@
sample->info = info;
onOutputSampleAvailable(sample);
+
+ if (mStopRequest == STOP_ON_SYNC && info.flags & SAMPLE_FLAG_SYNC_SAMPLE) {
+ break;
+ }
}
- if (mStopRequested && !mEosFromSource) {
- return AMEDIA_ERROR_UNKNOWN; // TODO: Custom error codes?
+ if (mStopRequest != NONE && !eosReached) {
+ *stopped = true;
}
return AMEDIA_OK;
}
void PassthroughTrackTranscoder::abortTranscodeLoop() {
- mStopRequested = true;
- mBufferPool->abort();
+ if (mStopRequest == STOP_NOW) {
+ mBufferPool->abort();
+ }
}
std::shared_ptr<AMediaFormat> PassthroughTrackTranscoder::getOutputFormat() const {
diff --git a/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp b/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp
index c1456fd..5ec5e08 100644
--- a/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp
+++ b/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp
@@ -156,11 +156,7 @@
static_cast<VideoTrackTranscoder::CodecWrapper*>(userdata);
if (auto transcoder = wrapper->getTranscoder()) {
transcoder->mCodecMessageQueue.push(
- [transcoder, error] {
- transcoder->mStatus = error;
- transcoder->mStopRequested = true;
- },
- true);
+ [transcoder, error] { transcoder->mStatus = error; }, true);
}
}
};
@@ -406,6 +402,8 @@
sample->info.presentationTimeUs = bufferInfo.presentationTimeUs;
onOutputSampleAvailable(sample);
+
+ mLastSampleWasSync = sample->info.flags & SAMPLE_FLAG_SYNC_SAMPLE;
} else if (bufferIndex == AMEDIACODEC_INFO_OUTPUT_FORMAT_CHANGED) {
AMediaFormat* newFormat = AMediaCodec_getOutputFormat(mEncoder->getCodec());
LOG(DEBUG) << "Encoder output format changed: " << AMediaFormat_toString(newFormat);
@@ -485,7 +483,7 @@
notifyTrackFormatAvailable();
}
-media_status_t VideoTrackTranscoder::runTranscodeLoop() {
+media_status_t VideoTrackTranscoder::runTranscodeLoop(bool* stopped) {
androidSetThreadPriority(0 /* tid (0 = current) */, ANDROID_PRIORITY_VIDEO);
// Push start decoder and encoder as two messages, so that these are subject to the
@@ -509,25 +507,31 @@
});
// Process codec events until EOS is reached, transcoding is stopped or an error occurs.
- while (!mStopRequested && !mEosFromEncoder && mStatus == AMEDIA_OK) {
+ while (mStopRequest != STOP_NOW && !mEosFromEncoder && mStatus == AMEDIA_OK) {
std::function<void()> message = mCodecMessageQueue.pop();
message();
+
+ if (mStopRequest == STOP_ON_SYNC && mLastSampleWasSync) {
+ break;
+ }
}
mCodecMessageQueue.abort();
AMediaCodec_stop(mDecoder);
- // Return error if transcoding was stopped before it finished.
- if (mStopRequested && !mEosFromEncoder && mStatus == AMEDIA_OK) {
- mStatus = AMEDIA_ERROR_UNKNOWN; // TODO: Define custom error codes?
+ // Signal if transcoding was stopped before it finished.
+ if (mStopRequest != NONE && !mEosFromEncoder && mStatus == AMEDIA_OK) {
+ *stopped = true;
}
return mStatus;
}
void VideoTrackTranscoder::abortTranscodeLoop() {
- // Push abort message to the front of the codec event queue.
- mCodecMessageQueue.push([this] { mStopRequested = true; }, true /* front */);
+ if (mStopRequest == STOP_NOW) {
+ // Wake up transcoder thread.
+ mCodecMessageQueue.push([] {}, true /* front */);
+ }
}
std::shared_ptr<AMediaFormat> VideoTrackTranscoder::getOutputFormat() const {
diff --git a/media/libmediatranscoding/transcoder/benchmark/MediaTrackTranscoderBenchmark.cpp b/media/libmediatranscoding/transcoder/benchmark/MediaTrackTranscoderBenchmark.cpp
index aee0ed6..351d80b 100644
--- a/media/libmediatranscoding/transcoder/benchmark/MediaTrackTranscoderBenchmark.cpp
+++ b/media/libmediatranscoding/transcoder/benchmark/MediaTrackTranscoderBenchmark.cpp
@@ -61,6 +61,12 @@
mCondition.notify_all();
}
+ virtual void onTrackStopped(const MediaTrackTranscoder* transcoder __unused) override {
+ std::unique_lock lock(mMutex);
+ mFinished = true;
+ mCondition.notify_all();
+ }
+
virtual void onTrackError(const MediaTrackTranscoder* transcoder __unused,
media_status_t status) override {
std::unique_lock lock(mMutex);
diff --git a/media/libmediatranscoding/transcoder/include/media/MediaSampleWriter.h b/media/libmediatranscoding/transcoder/include/media/MediaSampleWriter.h
index f762556..080f2b7 100644
--- a/media/libmediatranscoding/transcoder/include/media/MediaSampleWriter.h
+++ b/media/libmediatranscoding/transcoder/include/media/MediaSampleWriter.h
@@ -84,6 +84,9 @@
*/
virtual void onFinished(const MediaSampleWriter* writer, media_status_t status) = 0;
+ /** Sample writer was stopped before it was finished. */
+ virtual void onStopped(const MediaSampleWriter* writer) = 0;
+
/** Sample writer progress update in percent. */
virtual void onProgressUpdate(const MediaSampleWriter* writer, int32_t progress) = 0;
@@ -129,15 +132,14 @@
bool start();
/**
- * Stops the sample writer. If the sample writer is not yet finished its operation will be
- * aborted and an error value will be returned to the client in the callback supplied to
- * {@link #start}. If the sample writer has already finished and the client callback has fired
- * the writer has already automatically stopped and there is no need to call stop manually. Once
- * the sample writer has been stopped it cannot be restarted.
- * @return True if the sample writer was successfully stopped on this call. False if the sample
- * writer was already stopped or was never started.
+ * Stops the sample writer. If the sample writer is not yet finished, its operation will be
+ * aborted and the onStopped callback will fire. If the sample writer has already finished and
+ * the onFinished callback has fired the writer has already automatically stopped and there is
+ * no need to call stop manually. Once the sample writer has been stopped it cannot be
+ * restarted. This method is asynchronous and will not wait for the sample writer to stop before
+ * returning.
*/
- bool stop();
+ void stop();
/** Destructor. */
~MediaSampleWriter();
@@ -186,7 +188,6 @@
std::mutex mMutex; // Protects sample queue and state.
std::condition_variable mSampleSignal;
- std::thread mThread;
std::unordered_map<size_t, TrackRecord> mTracks;
std::priority_queue<SampleEntry, std::vector<SampleEntry>, SampleComparator> mSampleQueue
GUARDED_BY(mMutex);
@@ -200,8 +201,8 @@
MediaSampleWriter() : mState(UNINITIALIZED){};
void addSampleToTrack(size_t trackIndex, const std::shared_ptr<MediaSample>& sample);
- media_status_t writeSamples();
- media_status_t runWriterLoop();
+ media_status_t writeSamples(bool* wasStopped);
+ media_status_t runWriterLoop(bool* wasStopped);
};
} // namespace android
diff --git a/media/libmediatranscoding/transcoder/include/media/MediaTrackTranscoder.h b/media/libmediatranscoding/transcoder/include/media/MediaTrackTranscoder.h
index c5e161c..724b919 100644
--- a/media/libmediatranscoding/transcoder/include/media/MediaTrackTranscoder.h
+++ b/media/libmediatranscoding/transcoder/include/media/MediaTrackTranscoder.h
@@ -62,18 +62,21 @@
const std::shared_ptr<AMediaFormat>& destinationFormat);
/**
- * Starts the track transcoder. Once started the track transcoder have to be stopped by calling
- * {@link #stop}, even after completing successfully. Start should only be called once.
+ * Starts the track transcoder. After the track transcoder is successfully started it will run
+ * until a callback signals that transcoding has ended. Start should only be called once.
* @return True if the track transcoder started, or false if it had already been started.
*/
bool start();
/**
* Stops the track transcoder. Once the transcoding has been stopped it cannot be restarted
- * again. It is safe to call stop multiple times.
- * @return True if the track transcoder stopped, or false if it was already stopped.
+ * again. It is safe to call stop multiple times. Stop is an asynchronous operation. Once the
+ * track transcoder has stopped the onTrackStopped callback will get called, unless the
+ * transcoding finished or encountered an error before it could be stopped in which case the
+ * callbacks corresponding to those events will be called instead.
+ * @param stopOnSyncSample Request the transcoder to stop after emitting a sync sample.
*/
- bool stop();
+ void stop(bool stopOnSyncSample = false);
/**
* Set the sample consumer function. The MediaTrackTranscoder will deliver transcoded samples to
@@ -100,7 +103,9 @@
// Called by subclasses when the actual track format becomes available.
void notifyTrackFormatAvailable();
- // Called by subclasses when a transcoded sample is available.
+ // Called by subclasses when a transcoded sample is available. Samples must not hold a strong
+ // reference to the track transcoder in order to avoid retain cycles through the track
+ // transcoder's sample queue.
void onOutputSampleAvailable(const std::shared_ptr<MediaSample>& sample);
// configureDestinationFormat needs to be implemented by subclasses, and gets called on an
@@ -110,7 +115,7 @@
// runTranscodeLoop needs to be implemented by subclasses, and gets called on
// MediaTrackTranscoder's internal thread when the track transcoder is started.
- virtual media_status_t runTranscodeLoop() = 0;
+ virtual media_status_t runTranscodeLoop(bool* stopped) = 0;
// abortTranscodeLoop needs to be implemented by subclasses, and should request transcoding to
// be aborted as soon as possible. It should be safe to call abortTranscodeLoop multiple times.
@@ -120,13 +125,20 @@
int mTrackIndex;
std::shared_ptr<AMediaFormat> mSourceFormat;
+ enum StopRequest {
+ NONE,
+ STOP_NOW,
+ STOP_ON_SYNC,
+ };
+ std::atomic<StopRequest> mStopRequest = NONE;
+
private:
std::mutex mSampleMutex;
+ // SampleQueue for buffering output samples before a sample consumer has been set.
MediaSampleQueue mSampleQueue GUARDED_BY(mSampleMutex);
MediaSampleWriter::MediaSampleConsumerFunction mSampleConsumer GUARDED_BY(mSampleMutex);
const std::weak_ptr<MediaTrackTranscoderCallback> mTranscoderCallback;
std::mutex mStateMutex;
- std::thread mTranscodingThread GUARDED_BY(mStateMutex);
enum {
UNINITIALIZED,
CONFIGURED,
diff --git a/media/libmediatranscoding/transcoder/include/media/MediaTrackTranscoderCallback.h b/media/libmediatranscoding/transcoder/include/media/MediaTrackTranscoderCallback.h
index 654171e..7b62d46 100644
--- a/media/libmediatranscoding/transcoder/include/media/MediaTrackTranscoderCallback.h
+++ b/media/libmediatranscoding/transcoder/include/media/MediaTrackTranscoderCallback.h
@@ -39,6 +39,12 @@
virtual void onTrackFinished(const MediaTrackTranscoder* transcoder);
/**
+ * Called when the MediaTrackTranscoder instance was explicitly stopped before it was finished.
+ * @param transcoder The MediaTrackTranscoder that was stopped.
+ */
+ virtual void onTrackStopped(const MediaTrackTranscoder* transcoder);
+
+ /**
* Called when the MediaTrackTranscoder instance encountered an error it could not recover from.
* @param transcoder The MediaTrackTranscoder that encountered the error.
* @param status The non-zero error code describing the encountered error.
diff --git a/media/libmediatranscoding/transcoder/include/media/MediaTranscoder.h b/media/libmediatranscoding/transcoder/include/media/MediaTranscoder.h
index 4bbb41a..4e11ef5 100644
--- a/media/libmediatranscoding/transcoder/include/media/MediaTranscoder.h
+++ b/media/libmediatranscoding/transcoder/include/media/MediaTranscoder.h
@@ -96,23 +96,25 @@
media_status_t start();
/**
- * Pauses transcoding. The transcoder's paused state is returned through pausedState. The
- * paused state is only needed for resuming transcoding with a new MediaTranscoder instance. The
- * caller can resume transcoding with the current MediaTranscoder instance at any time by
- * calling resume(). It is not required to cancel a paused transcoder. The paused state is
- * independent and the caller can always initialize a new transcoder instance with the same
- * paused state. If the caller wishes to abandon a paused transcoder's operation they can
- * release the transcoder instance, clear the paused state and delete the partial destination
- * file. The caller can optionally call cancel to let the transcoder clean up the partial
- * destination file.
+ * Pauses transcoding and finalizes the partial transcoded file to disk. Pause is a synchronous
+ * operation and will wait until all internal components are done. Once this method returns it
+ * is safe to release the transcoder instance. No callback will be called if the transcoder was
+ * paused successfully. But if the transcoding finishes or encountered an error during pause,
+ * the corresponding callback will be called.
*/
media_status_t pause(std::shared_ptr<ndk::ScopedAParcel>* pausedState);
/** Resumes a paused transcoding. */
media_status_t resume();
- /** Cancels the transcoding. Once canceled the transcoding can not be restarted. Client
- * will be responsible for cleaning up the abandoned file. */
+ /**
+ * Cancels the transcoding. Once canceled the transcoding can not be restarted. Client
+ * will be responsible for cleaning up the abandoned file. Cancel is a synchronous operation and
+ * will wait until all internal components are done. Once this method returns it is safe to
+ * release the transcoder instance. Normally no callback will be called when the transcoder is
+ * cancelled. But if the transcoding finishes or encountered an error during cancel, the
+ * corresponding callback will be called.
+ */
media_status_t cancel();
virtual ~MediaTranscoder() = default;
@@ -123,17 +125,20 @@
// MediaTrackTranscoderCallback
virtual void onTrackFormatAvailable(const MediaTrackTranscoder* transcoder) override;
virtual void onTrackFinished(const MediaTrackTranscoder* transcoder) override;
+ virtual void onTrackStopped(const MediaTrackTranscoder* transcoder) override;
virtual void onTrackError(const MediaTrackTranscoder* transcoder,
media_status_t status) override;
// ~MediaTrackTranscoderCallback
// MediaSampleWriter::CallbackInterface
virtual void onFinished(const MediaSampleWriter* writer, media_status_t status) override;
+ virtual void onStopped(const MediaSampleWriter* writer) override;
virtual void onProgressUpdate(const MediaSampleWriter* writer, int32_t progress) override;
// ~MediaSampleWriter::CallbackInterface
- void onSampleWriterFinished(media_status_t status);
- void sendCallback(media_status_t status);
+ void onThreadFinished(const void* thread, media_status_t threadStatus, bool threadStopped);
+ media_status_t requestStop(bool stopOnSync);
+ void waitForThreads();
std::shared_ptr<CallbackInterface> mCallbacks;
std::shared_ptr<MediaSampleReader> mSampleReader;
@@ -145,7 +150,20 @@
pid_t mPid;
uid_t mUid;
- std::atomic_bool mCallbackSent = false;
+ enum ThreadState {
+ PENDING = 0, // Not yet started.
+ RUNNING, // Currently running.
+ DONE, // Done running (can be finished, stopped or error).
+ };
+ std::mutex mThreadStateMutex;
+ std::condition_variable mThreadsDoneSignal;
+ std::unordered_map<const void*, ThreadState> mThreadStates GUARDED_BY(mThreadStateMutex);
+ media_status_t mTranscoderStatus GUARDED_BY(mThreadStateMutex) = AMEDIA_OK;
+ bool mTranscoderStopped GUARDED_BY(mThreadStateMutex) = false;
+ bool mThreadsDone GUARDED_BY(mThreadStateMutex) = false;
+ bool mCallbackSent GUARDED_BY(mThreadStateMutex) = false;
+ bool mSampleWriterStopped GUARDED_BY(mThreadStateMutex) = false;
+
std::atomic_bool mCancelled = false;
};
diff --git a/media/libmediatranscoding/transcoder/include/media/PassthroughTrackTranscoder.h b/media/libmediatranscoding/transcoder/include/media/PassthroughTrackTranscoder.h
index b9491ed..c074831 100644
--- a/media/libmediatranscoding/transcoder/include/media/PassthroughTrackTranscoder.h
+++ b/media/libmediatranscoding/transcoder/include/media/PassthroughTrackTranscoder.h
@@ -86,7 +86,7 @@
};
// MediaTrackTranscoder
- media_status_t runTranscodeLoop() override;
+ media_status_t runTranscodeLoop(bool* stopped) override;
void abortTranscodeLoop() override;
media_status_t configureDestinationFormat(
const std::shared_ptr<AMediaFormat>& destinationFormat) override;
@@ -94,8 +94,6 @@
// ~MediaTrackTranscoder
std::shared_ptr<BufferPool> mBufferPool;
- bool mEosFromSource = false;
- std::atomic_bool mStopRequested = false;
};
} // namespace android
diff --git a/media/libmediatranscoding/transcoder/include/media/VideoTrackTranscoder.h b/media/libmediatranscoding/transcoder/include/media/VideoTrackTranscoder.h
index 33ae3ba..d2ffb01 100644
--- a/media/libmediatranscoding/transcoder/include/media/VideoTrackTranscoder.h
+++ b/media/libmediatranscoding/transcoder/include/media/VideoTrackTranscoder.h
@@ -67,7 +67,7 @@
: MediaTrackTranscoder(transcoderCallback), mPid(pid), mUid(uid){};
// MediaTrackTranscoder
- media_status_t runTranscodeLoop() override;
+ media_status_t runTranscodeLoop(bool* stopped) override;
void abortTranscodeLoop() override;
media_status_t configureDestinationFormat(
const std::shared_ptr<AMediaFormat>& destinationFormat) override;
@@ -91,7 +91,7 @@
ANativeWindow* mSurface = nullptr;
bool mEosFromSource = false;
bool mEosFromEncoder = false;
- bool mStopRequested = false;
+ bool mLastSampleWasSync = false;
media_status_t mStatus = AMEDIA_OK;
MediaSampleInfo mSampleInfo;
BlockingQueue<std::function<void()>> mCodecMessageQueue;
diff --git a/media/libmediatranscoding/transcoder/tests/Android.bp b/media/libmediatranscoding/transcoder/tests/Android.bp
index 7ae6261..d0ea802 100644
--- a/media/libmediatranscoding/transcoder/tests/Android.bp
+++ b/media/libmediatranscoding/transcoder/tests/Android.bp
@@ -1,10 +1,4 @@
// Unit tests for libmediatranscoder.
-
-filegroup {
- name: "test_assets",
- srcs: ["assets/*"],
-}
-
cc_defaults {
name: "testdefaults",
@@ -13,11 +7,16 @@
"libmedia_headers",
],
+ static_libs: [
+ "libmediatranscoder",
+ ],
shared_libs: [
"libbase",
+ "libbinder_ndk",
+ "libcrypto",
"libcutils",
"libmediandk",
- "libmediatranscoder_asan",
+ "libnativewindow",
"libutils",
],
@@ -32,7 +31,6 @@
"signed-integer-overflow",
],
cfi: true,
- address: true,
},
data: [":test_assets"],
@@ -59,7 +57,6 @@
name: "MediaTrackTranscoderTests",
defaults: ["testdefaults"],
srcs: ["MediaTrackTranscoderTests.cpp"],
- shared_libs: ["libbinder_ndk"],
}
// VideoTrackTranscoder unit test
@@ -74,7 +71,6 @@
name: "PassthroughTrackTranscoderTests",
defaults: ["testdefaults"],
srcs: ["PassthroughTrackTranscoderTests.cpp"],
- shared_libs: ["libcrypto"],
}
// MediaSampleWriter unit test
@@ -89,5 +85,4 @@
name: "MediaTranscoderTests",
defaults: ["testdefaults"],
srcs: ["MediaTranscoderTests.cpp"],
- shared_libs: ["libbinder_ndk"],
}
diff --git a/media/libmediatranscoding/transcoder/tests/AndroidTestTemplate.xml b/media/libmediatranscoding/transcoder/tests/AndroidTestTemplate.xml
index a9a7e2e..6d781cd 100644
--- a/media/libmediatranscoding/transcoder/tests/AndroidTestTemplate.xml
+++ b/media/libmediatranscoding/transcoder/tests/AndroidTestTemplate.xml
@@ -17,12 +17,12 @@
<option name="test-suite-tag" value="TranscoderTests" />
<target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
<option name="cleanup" value="false" />
- <option name="push-file"
- key="assets"
- value="/data/local/tmp/TranscodingTestAssets" />
+ <option name="push-file" key="TranscodingTestAssets" value="/data/local/tmp/TranscodingTestAssets" />
+ <option name="push-file" key="{MODULE}" value="/data/local/tmp/{MODULE}" />
</target_preparer>
<test class="com.android.tradefed.testtype.GTest" >
+ <option name="native-test-device-path" value="/data/local/tmp" />
<option name="module-name" value="{MODULE}" />
</test>
</configuration>
diff --git a/media/libmediatranscoding/transcoder/tests/MediaSampleReaderNDKTests.cpp b/media/libmediatranscoding/transcoder/tests/MediaSampleReaderNDKTests.cpp
index 9c9c8b5..11af0b1 100644
--- a/media/libmediatranscoding/transcoder/tests/MediaSampleReaderNDKTests.cpp
+++ b/media/libmediatranscoding/transcoder/tests/MediaSampleReaderNDKTests.cpp
@@ -25,39 +25,166 @@
#include <fcntl.h>
#include <gtest/gtest.h>
#include <media/MediaSampleReaderNDK.h>
+#include <openssl/md5.h>
#include <utils/Timers.h>
#include <cmath>
#include <mutex>
#include <thread>
-// TODO(b/153453392): Test more asset types and validate sample data from readSampleDataForTrack.
-// TODO(b/153453392): Test for sequential and parallel (single thread and multi thread) access.
-// TODO(b/153453392): Test for switching between sequential and parallel access in different points
-// of time.
+// TODO(b/153453392): Test more asset types (frame reordering?).
namespace android {
#define SEC_TO_USEC(s) ((s)*1000 * 1000)
+/** Helper class for comparing sample data using checksums. */
+class Sample {
+public:
+ Sample(uint32_t flags, int64_t timestamp, size_t size, const uint8_t* buffer)
+ : mFlags{flags}, mTimestamp{timestamp}, mSize{size} {
+ initChecksum(buffer);
+ }
+
+ Sample(AMediaExtractor* extractor) {
+ mFlags = AMediaExtractor_getSampleFlags(extractor);
+ mTimestamp = AMediaExtractor_getSampleTime(extractor);
+ mSize = static_cast<size_t>(AMediaExtractor_getSampleSize(extractor));
+
+ auto buffer = std::make_unique<uint8_t[]>(mSize);
+ AMediaExtractor_readSampleData(extractor, buffer.get(), mSize);
+
+ initChecksum(buffer.get());
+ }
+
+ void initChecksum(const uint8_t* buffer) {
+ MD5_CTX md5Ctx;
+ MD5_Init(&md5Ctx);
+ MD5_Update(&md5Ctx, buffer, mSize);
+ MD5_Final(mChecksum, &md5Ctx);
+ }
+
+ bool operator==(const Sample& rhs) const {
+ return mSize == rhs.mSize && mFlags == rhs.mFlags && mTimestamp == rhs.mTimestamp &&
+ memcmp(mChecksum, rhs.mChecksum, MD5_DIGEST_LENGTH) == 0;
+ }
+
+ uint32_t mFlags;
+ int64_t mTimestamp;
+ size_t mSize;
+ uint8_t mChecksum[MD5_DIGEST_LENGTH];
+};
+
+/** Constant for selecting all samples. */
+static constexpr int SAMPLE_COUNT_ALL = -1;
+
+/**
+ * Utility class to test different sample access patterns combined with sequential or parallel
+ * sample access modes.
+ */
+class SampleAccessTester {
+public:
+ SampleAccessTester(int sourceFd, size_t fileSize) {
+ mSampleReader = MediaSampleReaderNDK::createFromFd(sourceFd, 0, fileSize);
+ EXPECT_TRUE(mSampleReader);
+
+ mTrackCount = mSampleReader->getTrackCount();
+
+ for (int trackIndex = 0; trackIndex < mTrackCount; trackIndex++) {
+ EXPECT_EQ(mSampleReader->selectTrack(trackIndex), AMEDIA_OK);
+ }
+
+ mSamples.resize(mTrackCount);
+ mTrackThreads.resize(mTrackCount);
+ }
+
+ void getSampleInfo(int trackIndex) {
+ MediaSampleInfo info;
+ media_status_t status = mSampleReader->getSampleInfoForTrack(trackIndex, &info);
+ EXPECT_EQ(status, AMEDIA_OK);
+ }
+
+ void readSamplesAsync(int trackIndex, int sampleCount) {
+ mTrackThreads[trackIndex] = std::thread{[this, trackIndex, sampleCount] {
+ int samplesRead = 0;
+ MediaSampleInfo info;
+ while (samplesRead < sampleCount || sampleCount == SAMPLE_COUNT_ALL) {
+ media_status_t status = mSampleReader->getSampleInfoForTrack(trackIndex, &info);
+ if (status != AMEDIA_OK) {
+ EXPECT_EQ(status, AMEDIA_ERROR_END_OF_STREAM);
+ EXPECT_TRUE((info.flags & SAMPLE_FLAG_END_OF_STREAM) != 0);
+ break;
+ }
+ ASSERT_TRUE((info.flags & SAMPLE_FLAG_END_OF_STREAM) == 0);
+
+ auto buffer = std::make_unique<uint8_t[]>(info.size);
+ status = mSampleReader->readSampleDataForTrack(trackIndex, buffer.get(), info.size);
+ EXPECT_EQ(status, AMEDIA_OK);
+
+ mSampleMutex.lock();
+ const uint8_t* bufferPtr = buffer.get();
+ mSamples[trackIndex].emplace_back(info.flags, info.presentationTimeUs, info.size,
+ bufferPtr);
+ mSampleMutex.unlock();
+ ++samplesRead;
+ }
+ }};
+ }
+
+ void readSamplesAsync(int sampleCount) {
+ for (int trackIndex = 0; trackIndex < mTrackCount; trackIndex++) {
+ readSamplesAsync(trackIndex, sampleCount);
+ }
+ }
+
+ void waitForTrack(int trackIndex) {
+ ASSERT_TRUE(mTrackThreads[trackIndex].joinable());
+ mTrackThreads[trackIndex].join();
+ }
+
+ void waitForTracks() {
+ for (int trackIndex = 0; trackIndex < mTrackCount; trackIndex++) {
+ waitForTrack(trackIndex);
+ }
+ }
+
+ void setEnforceSequentialAccess(bool enforce) {
+ media_status_t status = mSampleReader->setEnforceSequentialAccess(enforce);
+ EXPECT_EQ(status, AMEDIA_OK);
+ }
+
+ std::vector<std::vector<Sample>>& getSamples() { return mSamples; }
+
+ std::shared_ptr<MediaSampleReader> mSampleReader;
+ size_t mTrackCount;
+ std::mutex mSampleMutex;
+ std::vector<std::thread> mTrackThreads;
+ std::vector<std::vector<Sample>> mSamples;
+};
+
class MediaSampleReaderNDKTests : public ::testing::Test {
public:
MediaSampleReaderNDKTests() { LOG(DEBUG) << "MediaSampleReaderNDKTests created"; }
void SetUp() override {
LOG(DEBUG) << "MediaSampleReaderNDKTests set up";
+
+ // Need to start a thread pool to prevent AMediaExtractor binder calls from starving
+ // (b/155663561).
+ ABinderProcess_startThreadPool();
+
const char* sourcePath =
"/data/local/tmp/TranscodingTestAssets/cubicle_avc_480x240_aac_24KHz.mp4";
- mExtractor = AMediaExtractor_new();
- ASSERT_NE(mExtractor, nullptr);
-
mSourceFd = open(sourcePath, O_RDONLY);
ASSERT_GT(mSourceFd, 0);
mFileSize = lseek(mSourceFd, 0, SEEK_END);
lseek(mSourceFd, 0, SEEK_SET);
+ mExtractor = AMediaExtractor_new();
+ ASSERT_NE(mExtractor, nullptr);
+
media_status_t status =
AMediaExtractor_setDataSourceFd(mExtractor, mSourceFd, 0, mFileSize);
ASSERT_EQ(status, AMEDIA_OK);
@@ -68,14 +195,14 @@
}
}
- void initExtractorTimestamps() {
- // Save all sample timestamps, per track, as reported by the extractor.
- mExtractorTimestamps.resize(mTrackCount);
+ void initExtractorSamples() {
+ if (mExtractorSamples.size() == mTrackCount) return;
+
+ // Save sample information, per track, as reported by the extractor.
+ mExtractorSamples.resize(mTrackCount);
do {
const int trackIndex = AMediaExtractor_getSampleTrackIndex(mExtractor);
- const int64_t sampleTime = AMediaExtractor_getSampleTime(mExtractor);
-
- mExtractorTimestamps[trackIndex].push_back(sampleTime);
+ mExtractorSamples[trackIndex].emplace_back(mExtractor);
} while (AMediaExtractor_advance(mExtractor));
AMediaExtractor_seekTo(mExtractor, 0, AMEDIAEXTRACTOR_SEEK_PREVIOUS_SYNC);
@@ -104,6 +231,22 @@
return bitrates;
}
+ void compareSamples(std::vector<std::vector<Sample>>& readerSamples) {
+ initExtractorSamples();
+ EXPECT_EQ(readerSamples.size(), mTrackCount);
+
+ for (int trackIndex = 0; trackIndex < mTrackCount; trackIndex++) {
+ LOG(DEBUG) << "Track " << trackIndex << ", comparing "
+ << readerSamples[trackIndex].size() << " samples.";
+ EXPECT_EQ(readerSamples[trackIndex].size(), mExtractorSamples[trackIndex].size());
+ for (size_t sampleIndex = 0; sampleIndex < readerSamples[trackIndex].size();
+ sampleIndex++) {
+ EXPECT_EQ(readerSamples[trackIndex][sampleIndex],
+ mExtractorSamples[trackIndex][sampleIndex]);
+ }
+ }
+ }
+
void TearDown() override {
LOG(DEBUG) << "MediaSampleReaderNDKTests tear down";
AMediaExtractor_delete(mExtractor);
@@ -116,58 +259,91 @@
size_t mTrackCount;
int mSourceFd;
size_t mFileSize;
- std::vector<std::vector<int64_t>> mExtractorTimestamps;
+ std::vector<std::vector<Sample>> mExtractorSamples;
};
-TEST_F(MediaSampleReaderNDKTests, TestSampleTimes) {
- LOG(DEBUG) << "TestSampleTimes Starts";
+/** Reads all samples from all tracks in parallel. */
+TEST_F(MediaSampleReaderNDKTests, TestParallelSampleAccess) {
+ LOG(DEBUG) << "TestParallelSampleAccess Starts";
- std::shared_ptr<MediaSampleReader> sampleReader =
- MediaSampleReaderNDK::createFromFd(mSourceFd, 0, mFileSize);
- ASSERT_TRUE(sampleReader);
+ SampleAccessTester tester{mSourceFd, mFileSize};
+ tester.readSamplesAsync(SAMPLE_COUNT_ALL);
+ tester.waitForTracks();
+ compareSamples(tester.getSamples());
+}
- for (int trackIndex = 0; trackIndex < mTrackCount; trackIndex++) {
- EXPECT_EQ(sampleReader->selectTrack(trackIndex), AMEDIA_OK);
- }
+/** Reads all samples from all tracks sequentially. */
+TEST_F(MediaSampleReaderNDKTests, TestSequentialSampleAccess) {
+ LOG(DEBUG) << "TestSequentialSampleAccess Starts";
- // Initialize the extractor timestamps.
- initExtractorTimestamps();
+ SampleAccessTester tester{mSourceFd, mFileSize};
+ tester.setEnforceSequentialAccess(true);
+ tester.readSamplesAsync(SAMPLE_COUNT_ALL);
+ tester.waitForTracks();
+ compareSamples(tester.getSamples());
+}
- std::mutex timestampMutex;
- std::vector<std::thread> trackThreads;
- std::vector<std::vector<int64_t>> readerTimestamps(mTrackCount);
+/** Reads all samples from one track in parallel mode before switching to sequential mode. */
+TEST_F(MediaSampleReaderNDKTests, TestMixedSampleAccessTrackEOS) {
+ LOG(DEBUG) << "TestMixedSampleAccessTrackEOS Starts";
- for (int trackIndex = 0; trackIndex < mTrackCount; trackIndex++) {
- trackThreads.emplace_back([sampleReader, trackIndex, ×tampMutex, &readerTimestamps] {
- MediaSampleInfo info;
- while (true) {
- media_status_t status = sampleReader->getSampleInfoForTrack(trackIndex, &info);
- if (status != AMEDIA_OK) {
- EXPECT_EQ(status, AMEDIA_ERROR_END_OF_STREAM);
- EXPECT_TRUE((info.flags & SAMPLE_FLAG_END_OF_STREAM) != 0);
- break;
- }
- ASSERT_TRUE((info.flags & SAMPLE_FLAG_END_OF_STREAM) == 0);
- timestampMutex.lock();
- readerTimestamps[trackIndex].push_back(info.presentationTimeUs);
- timestampMutex.unlock();
- sampleReader->advanceTrack(trackIndex);
+ for (int readSampleInfoFlag = 0; readSampleInfoFlag <= 1; readSampleInfoFlag++) {
+ for (int trackIndToEOS = 0; trackIndToEOS < mTrackCount; ++trackIndToEOS) {
+ LOG(DEBUG) << "Testing EOS of track " << trackIndToEOS;
+
+ SampleAccessTester tester{mSourceFd, mFileSize};
+
+ // If the flag is set, read sample info from a different track before draining the track
+ // under test to force the reader to save the extractor position.
+ if (readSampleInfoFlag) {
+ tester.getSampleInfo((trackIndToEOS + 1) % mTrackCount);
}
- });
- }
- for (auto& thread : trackThreads) {
- thread.join();
- }
+ // Read all samples from one track before enabling sequential access
+ tester.readSamplesAsync(trackIndToEOS, SAMPLE_COUNT_ALL);
+ tester.waitForTrack(trackIndToEOS);
+ tester.setEnforceSequentialAccess(true);
- for (int trackIndex = 0; trackIndex < mTrackCount; trackIndex++) {
- LOG(DEBUG) << "Track " << trackIndex << ", comparing "
- << readerTimestamps[trackIndex].size() << " samples.";
- EXPECT_EQ(readerTimestamps[trackIndex].size(), mExtractorTimestamps[trackIndex].size());
- for (size_t sampleIndex = 0; sampleIndex < readerTimestamps[trackIndex].size();
- sampleIndex++) {
- EXPECT_EQ(readerTimestamps[trackIndex][sampleIndex],
- mExtractorTimestamps[trackIndex][sampleIndex]);
+ for (int trackIndex = 0; trackIndex < mTrackCount; ++trackIndex) {
+ if (trackIndex == trackIndToEOS) continue;
+
+ tester.readSamplesAsync(trackIndex, SAMPLE_COUNT_ALL);
+ tester.waitForTrack(trackIndex);
+ }
+
+ compareSamples(tester.getSamples());
+ }
+ }
+}
+
+/**
+ * Reads different combinations of sample counts from all tracks in parallel mode before switching
+ * to sequential mode and reading the rest of the samples.
+ */
+TEST_F(MediaSampleReaderNDKTests, TestMixedSampleAccess) {
+ LOG(DEBUG) << "TestMixedSampleAccess Starts";
+ initExtractorSamples();
+
+ for (int trackIndToTest = 0; trackIndToTest < mTrackCount; ++trackIndToTest) {
+ for (int sampleCount = 0; sampleCount <= (mExtractorSamples[trackIndToTest].size() + 1);
+ ++sampleCount) {
+ SampleAccessTester tester{mSourceFd, mFileSize};
+
+ for (int trackIndex = 0; trackIndex < mTrackCount; ++trackIndex) {
+ if (trackIndex == trackIndToTest) {
+ tester.readSamplesAsync(trackIndex, sampleCount);
+ } else {
+ tester.readSamplesAsync(trackIndex, mExtractorSamples[trackIndex].size() / 2);
+ }
+ }
+
+ tester.waitForTracks();
+ tester.setEnforceSequentialAccess(true);
+
+ tester.readSamplesAsync(SAMPLE_COUNT_ALL);
+ tester.waitForTracks();
+
+ compareSamples(tester.getSamples());
}
}
}
diff --git a/media/libmediatranscoding/transcoder/tests/MediaSampleWriterTests.cpp b/media/libmediatranscoding/transcoder/tests/MediaSampleWriterTests.cpp
index 46f3e9b..0a41b00 100644
--- a/media/libmediatranscoding/transcoder/tests/MediaSampleWriterTests.cpp
+++ b/media/libmediatranscoding/transcoder/tests/MediaSampleWriterTests.cpp
@@ -179,8 +179,6 @@
class TestCallbacks : public MediaSampleWriter::CallbackInterface {
public:
- TestCallbacks(bool expectSuccess = true) : mExpectSuccess(expectSuccess) {}
-
bool hasFinished() {
std::unique_lock<std::mutex> lock(mMutex);
return mFinished;
@@ -191,12 +189,15 @@
media_status_t status) override {
std::unique_lock<std::mutex> lock(mMutex);
EXPECT_FALSE(mFinished);
- if (mExpectSuccess) {
- EXPECT_EQ(status, AMEDIA_OK);
- } else {
- EXPECT_NE(status, AMEDIA_OK);
- }
mFinished = true;
+ mStatus = status;
+ mCondition.notify_all();
+ }
+
+ virtual void onStopped(const MediaSampleWriter* writer __unused) {
+ std::unique_lock<std::mutex> lock(mMutex);
+ EXPECT_FALSE(mFinished);
+ mStopped = true;
mCondition.notify_all();
}
@@ -213,18 +214,20 @@
void waitForWritingFinished() {
std::unique_lock<std::mutex> lock(mMutex);
- while (!mFinished) {
+ while (!mFinished && !mStopped) {
mCondition.wait(lock);
}
}
uint32_t getProgressUpdateCount() const { return mProgressUpdateCount; }
+ bool wasStopped() const { return mStopped; }
private:
std::mutex mMutex;
std::condition_variable mCondition;
bool mFinished = false;
- bool mExpectSuccess;
+ bool mStopped = false;
+ media_status_t mStatus = AMEDIA_OK;
int32_t mLastProgress = -1;
uint32_t mProgressUpdateCount = 0;
};
@@ -316,8 +319,7 @@
TEST_F(MediaSampleWriterTests, TestDoubleStartStop) {
std::shared_ptr<MediaSampleWriter> writer = MediaSampleWriter::Create();
- std::shared_ptr<TestCallbacks> callbacks =
- std::make_shared<TestCallbacks>(false /* expectSuccess */);
+ std::shared_ptr<TestCallbacks> callbacks = std::make_shared<TestCallbacks>();
EXPECT_TRUE(writer->init(mTestMuxer, callbacks));
const TestMediaSource& mediaSource = getMediaSource();
@@ -327,9 +329,10 @@
ASSERT_TRUE(writer->start());
EXPECT_FALSE(writer->start());
- EXPECT_TRUE(writer->stop());
- EXPECT_TRUE(callbacks->hasFinished());
- EXPECT_FALSE(writer->stop());
+ writer->stop();
+ writer->stop();
+ callbacks->waitForWritingFinished();
+ EXPECT_TRUE(callbacks->wasStopped());
}
TEST_F(MediaSampleWriterTests, TestStopWithoutStart) {
@@ -340,7 +343,7 @@
EXPECT_NE(writer->addTrack(mediaSource.mTrackFormats[0]), nullptr);
EXPECT_EQ(mTestMuxer->popEvent(), TestMuxer::AddTrack(mediaSource.mTrackFormats[0].get()));
- EXPECT_FALSE(writer->stop());
+ writer->stop();
EXPECT_EQ(mTestMuxer->popEvent(), TestMuxer::NoEvent);
}
@@ -468,7 +471,6 @@
}
EXPECT_EQ(mTestMuxer->popEvent(), TestMuxer::Stop());
- EXPECT_TRUE(writer->stop());
EXPECT_TRUE(mTestCallbacks->hasFinished());
}
@@ -541,7 +543,6 @@
// Wait for writer.
mTestCallbacks->waitForWritingFinished();
- EXPECT_TRUE(writer->stop());
// Compare output file with source.
mediaSource.reset();
diff --git a/media/libmediatranscoding/transcoder/tests/MediaTrackTranscoderTests.cpp b/media/libmediatranscoding/transcoder/tests/MediaTrackTranscoderTests.cpp
index 83f0a4a..21f0b86 100644
--- a/media/libmediatranscoding/transcoder/tests/MediaTrackTranscoderTests.cpp
+++ b/media/libmediatranscoding/transcoder/tests/MediaTrackTranscoderTests.cpp
@@ -61,13 +61,10 @@
}
ASSERT_NE(mTranscoder, nullptr);
- initSampleReader();
+ initSampleReader("/data/local/tmp/TranscodingTestAssets/cubicle_avc_480x240_aac_24KHz.mp4");
}
- void initSampleReader() {
- const char* sourcePath =
- "/data/local/tmp/TranscodingTestAssets/cubicle_avc_480x240_aac_24KHz.mp4";
-
+ void initSampleReader(const char* sourcePath) {
const int sourceFd = open(sourcePath, O_RDONLY);
ASSERT_GT(sourceFd, 0);
@@ -157,16 +154,23 @@
ASSERT_TRUE(mTranscoder->start());
drainOutputSamples();
EXPECT_EQ(mCallback->waitUntilFinished(), AMEDIA_OK);
- EXPECT_TRUE(mTranscoder->stop());
+ EXPECT_TRUE(mCallback->transcodingFinished());
EXPECT_TRUE(mGotEndOfStream);
}
TEST_P(MediaTrackTranscoderTests, StopNormalOperation) {
LOG(DEBUG) << "Testing StopNormalOperation";
+
+ // Use a longer test asset to make sure that transcoding can be stopped.
+ initSampleReader("/data/local/tmp/TranscodingTestAssets/longtest_15s.mp4");
+
EXPECT_EQ(mTranscoder->configure(mMediaSampleReader, mTrackIndex, mDestinationFormat),
AMEDIA_OK);
EXPECT_TRUE(mTranscoder->start());
- EXPECT_TRUE(mTranscoder->stop());
+ mCallback->waitUntilTrackFormatAvailable();
+ mTranscoder->stop();
+ EXPECT_EQ(mCallback->waitUntilFinished(), AMEDIA_OK);
+ EXPECT_TRUE(mCallback->transcodingWasStopped());
}
TEST_P(MediaTrackTranscoderTests, StartWithoutConfigure) {
@@ -178,17 +182,23 @@
LOG(DEBUG) << "Testing StopWithoutStart";
EXPECT_EQ(mTranscoder->configure(mMediaSampleReader, mTrackIndex, mDestinationFormat),
AMEDIA_OK);
- EXPECT_FALSE(mTranscoder->stop());
+ mTranscoder->stop();
}
TEST_P(MediaTrackTranscoderTests, DoubleStartStop) {
LOG(DEBUG) << "Testing DoubleStartStop";
+
+ // Use a longer test asset to make sure that transcoding can be stopped.
+ initSampleReader("/data/local/tmp/TranscodingTestAssets/longtest_15s.mp4");
+
EXPECT_EQ(mTranscoder->configure(mMediaSampleReader, mTrackIndex, mDestinationFormat),
AMEDIA_OK);
EXPECT_TRUE(mTranscoder->start());
EXPECT_FALSE(mTranscoder->start());
- EXPECT_TRUE(mTranscoder->stop());
- EXPECT_FALSE(mTranscoder->stop());
+ mTranscoder->stop();
+ mTranscoder->stop();
+ EXPECT_EQ(mCallback->waitUntilFinished(), AMEDIA_OK);
+ EXPECT_TRUE(mCallback->transcodingWasStopped());
}
TEST_P(MediaTrackTranscoderTests, DoubleConfigure) {
@@ -212,7 +222,8 @@
EXPECT_EQ(mTranscoder->configure(mMediaSampleReader, mTrackIndex, mDestinationFormat),
AMEDIA_OK);
EXPECT_TRUE(mTranscoder->start());
- EXPECT_TRUE(mTranscoder->stop());
+ mTranscoder->stop();
+ EXPECT_EQ(mCallback->waitUntilFinished(), AMEDIA_OK);
EXPECT_FALSE(mTranscoder->start());
}
@@ -223,7 +234,7 @@
ASSERT_TRUE(mTranscoder->start());
drainOutputSamples();
EXPECT_EQ(mCallback->waitUntilFinished(), AMEDIA_OK);
- EXPECT_TRUE(mTranscoder->stop());
+ mTranscoder->stop();
EXPECT_FALSE(mTranscoder->start());
EXPECT_TRUE(mGotEndOfStream);
}
@@ -235,7 +246,7 @@
ASSERT_TRUE(mTranscoder->start());
drainOutputSamples(1 /* numSamplesToSave */);
EXPECT_EQ(mCallback->waitUntilFinished(), AMEDIA_OK);
- EXPECT_TRUE(mTranscoder->stop());
+ mTranscoder->stop();
EXPECT_TRUE(mGotEndOfStream);
mTranscoder.reset();
@@ -251,7 +262,8 @@
ASSERT_TRUE(mTranscoder->start());
drainOutputSamples(1 /* numSamplesToSave */);
mSamplesSavedSemaphore.wait();
- EXPECT_TRUE(mTranscoder->stop());
+ mTranscoder->stop();
+ EXPECT_EQ(mCallback->waitUntilFinished(), AMEDIA_OK);
std::this_thread::sleep_for(std::chrono::milliseconds(20));
mSavedSamples.clear();
@@ -272,6 +284,44 @@
AMEDIA_OK);
}
+TEST_P(MediaTrackTranscoderTests, StopOnSync) {
+ LOG(DEBUG) << "Testing StopOnSync";
+
+ // Use a longer test asset to make sure there is a GOP to finish.
+ initSampleReader("/data/local/tmp/TranscodingTestAssets/longtest_15s.mp4");
+
+ EXPECT_EQ(mTranscoder->configure(mMediaSampleReader, mTrackIndex, mDestinationFormat),
+ AMEDIA_OK);
+
+ bool lastSampleWasEos = false;
+ bool lastRealSampleWasSync = false;
+ OneShotSemaphore samplesReceivedSemaphore;
+ uint32_t sampleCount = 0;
+
+ mTranscoder->setSampleConsumer([&](const std::shared_ptr<MediaSample>& sample) {
+ ASSERT_NE(sample, nullptr);
+
+ if ((lastSampleWasEos = sample->info.flags & SAMPLE_FLAG_END_OF_STREAM)) {
+ samplesReceivedSemaphore.signal();
+ return;
+ }
+ lastRealSampleWasSync = sample->info.flags & SAMPLE_FLAG_SYNC_SAMPLE;
+
+ if (++sampleCount >= 10) { // Wait for a few samples before stopping.
+ samplesReceivedSemaphore.signal();
+ }
+ });
+
+ ASSERT_TRUE(mTranscoder->start());
+ samplesReceivedSemaphore.wait();
+ mTranscoder->stop(true /* stopOnSync */);
+ EXPECT_EQ(mCallback->waitUntilFinished(), AMEDIA_OK);
+
+ EXPECT_TRUE(lastSampleWasEos);
+ EXPECT_TRUE(lastRealSampleWasSync);
+ EXPECT_TRUE(mCallback->transcodingWasStopped());
+}
+
}; // namespace android
using namespace android;
diff --git a/media/libmediatranscoding/transcoder/tests/MediaTranscoderTests.cpp b/media/libmediatranscoding/transcoder/tests/MediaTranscoderTests.cpp
index f813a5c..5c59992 100644
--- a/media/libmediatranscoding/transcoder/tests/MediaTranscoderTests.cpp
+++ b/media/libmediatranscoding/transcoder/tests/MediaTranscoderTests.cpp
@@ -99,11 +99,11 @@
}
}
media_status_t mStatus = AMEDIA_OK;
+ bool mFinished = false;
private:
std::mutex mMutex;
std::condition_variable mCondition;
- bool mFinished = false;
bool mProgressMade = false;
};
@@ -145,6 +145,8 @@
kRunToCompletion,
kCancelAfterProgress,
kCancelAfterStart,
+ kPauseAfterProgress,
+ kPauseAfterStart,
} TranscodeExecutionControl;
using FormatConfigurationCallback = std::function<AMediaFormat*(AMediaFormat*)>;
@@ -181,7 +183,10 @@
media_status_t startStatus = transcoder->start();
EXPECT_EQ(startStatus, AMEDIA_OK);
+
if (startStatus == AMEDIA_OK) {
+ std::shared_ptr<ndk::ScopedAParcel> pausedState;
+
switch (executionControl) {
case kCancelAfterProgress:
mCallbacks->waitForProgressMade();
@@ -189,6 +194,12 @@
case kCancelAfterStart:
transcoder->cancel();
break;
+ case kPauseAfterProgress:
+ mCallbacks->waitForProgressMade();
+ FALLTHROUGH_INTENDED;
+ case kPauseAfterStart:
+ transcoder->pause(&pausedState);
+ break;
case kRunToCompletion:
default:
mCallbacks->waitForTranscodingFinished();
@@ -326,8 +337,9 @@
const char* destPath = "/data/local/tmp/MediaTranscoder_PreserveBitrate.MP4";
testTranscodeVideo(srcPath, destPath, AMEDIA_MIMETYPE_VIDEO_AVC);
- // Require maximum of 10% difference in file size.
- EXPECT_LT(getFileSizeDiffPercent(srcPath, destPath, true /* absolute*/), 10);
+ // Require maximum of 25% difference in file size.
+ // TODO(b/174678336): Find a better test asset to tighten the threshold.
+ EXPECT_LT(getFileSizeDiffPercent(srcPath, destPath, true /* absolute*/), 25);
}
TEST_F(MediaTranscoderTests, TestCustomBitrate) {
@@ -339,8 +351,9 @@
testTranscodeVideo(srcPath, destPath2, AMEDIA_MIMETYPE_VIDEO_AVC, 8 * 1000 * 1000);
// The source asset is very short and heavily compressed from the beginning so don't expect the
- // requested bitrate to be exactly matched. However 40% difference seems reasonable.
- EXPECT_GT(getFileSizeDiffPercent(destPath1, destPath2), 40);
+ // requested bitrate to be exactly matched. However the 8mbps should at least be larger.
+ // TODO(b/174678336): Find a better test asset to tighten the threshold.
+ EXPECT_GT(getFileSizeDiffPercent(destPath1, destPath2), 10);
}
static AMediaFormat* getAVCVideoFormat(AMediaFormat* sourceFormat) {
@@ -360,9 +373,10 @@
const char* srcPath = "/data/local/tmp/TranscodingTestAssets/longtest_15s.mp4";
const char* destPath = "/data/local/tmp/MediaTranscoder_Cancel.MP4";
- for (int i = 0; i < 32; ++i) {
+ for (int i = 0; i < 20; ++i) {
EXPECT_EQ(transcodeHelper(srcPath, destPath, getAVCVideoFormat, kCancelAfterProgress),
AMEDIA_OK);
+ EXPECT_FALSE(mCallbacks->mFinished);
mCallbacks = std::make_shared<TestCallbacks>();
}
}
@@ -371,9 +385,34 @@
const char* srcPath = "/data/local/tmp/TranscodingTestAssets/longtest_15s.mp4";
const char* destPath = "/data/local/tmp/MediaTranscoder_Cancel.MP4";
- for (int i = 0; i < 32; ++i) {
+ for (int i = 0; i < 20; ++i) {
EXPECT_EQ(transcodeHelper(srcPath, destPath, getAVCVideoFormat, kCancelAfterStart),
AMEDIA_OK);
+ EXPECT_FALSE(mCallbacks->mFinished);
+ mCallbacks = std::make_shared<TestCallbacks>();
+ }
+}
+
+TEST_F(MediaTranscoderTests, TestPauseAfterProgress) {
+ const char* srcPath = "/data/local/tmp/TranscodingTestAssets/longtest_15s.mp4";
+ const char* destPath = "/data/local/tmp/MediaTranscoder_Pause.MP4";
+
+ for (int i = 0; i < 20; ++i) {
+ EXPECT_EQ(transcodeHelper(srcPath, destPath, getAVCVideoFormat, kPauseAfterProgress),
+ AMEDIA_OK);
+ EXPECT_FALSE(mCallbacks->mFinished);
+ mCallbacks = std::make_shared<TestCallbacks>();
+ }
+}
+
+TEST_F(MediaTranscoderTests, TestPauseAfterStart) {
+ const char* srcPath = "/data/local/tmp/TranscodingTestAssets/longtest_15s.mp4";
+ const char* destPath = "/data/local/tmp/MediaTranscoder_Pause.MP4";
+
+ for (int i = 0; i < 20; ++i) {
+ EXPECT_EQ(transcodeHelper(srcPath, destPath, getAVCVideoFormat, kPauseAfterStart),
+ AMEDIA_OK);
+ EXPECT_FALSE(mCallbacks->mFinished);
mCallbacks = std::make_shared<TestCallbacks>();
}
}
diff --git a/media/libmediatranscoding/transcoder/tests/PassthroughTrackTranscoderTests.cpp b/media/libmediatranscoding/transcoder/tests/PassthroughTrackTranscoderTests.cpp
index 9713e17..5071efd 100644
--- a/media/libmediatranscoding/transcoder/tests/PassthroughTrackTranscoderTests.cpp
+++ b/media/libmediatranscoding/transcoder/tests/PassthroughTrackTranscoderTests.cpp
@@ -183,7 +183,6 @@
callback->waitUntilFinished();
EXPECT_EQ(sampleCount, sampleChecksums.size());
- EXPECT_TRUE(transcoder.stop());
}
/** Class for testing PassthroughTrackTranscoder's built in buffer pool. */
diff --git a/media/libmediatranscoding/transcoder/tests/TrackTranscoderTestUtils.h b/media/libmediatranscoding/transcoder/tests/TrackTranscoderTestUtils.h
index 8d05353..a782f71 100644
--- a/media/libmediatranscoding/transcoder/tests/TrackTranscoderTestUtils.h
+++ b/media/libmediatranscoding/transcoder/tests/TrackTranscoderTestUtils.h
@@ -33,20 +33,14 @@
AMediaFormat* sourceFormat, bool includeBitrate = true) {
// Default video destination format setup.
static constexpr float kFrameRate = 30.0f;
- static constexpr float kIFrameInterval = 30.0f;
static constexpr int32_t kBitRate = 2 * 1000 * 1000;
- static constexpr int32_t kColorFormatSurface = 0x7f000789;
AMediaFormat* destinationFormat = AMediaFormat_new();
AMediaFormat_copy(destinationFormat, sourceFormat);
AMediaFormat_setFloat(destinationFormat, AMEDIAFORMAT_KEY_FRAME_RATE, kFrameRate);
- AMediaFormat_setFloat(destinationFormat, AMEDIAFORMAT_KEY_I_FRAME_INTERVAL,
- kIFrameInterval);
if (includeBitrate) {
AMediaFormat_setInt32(destinationFormat, AMEDIAFORMAT_KEY_BIT_RATE, kBitRate);
}
- AMediaFormat_setInt32(destinationFormat, AMEDIAFORMAT_KEY_COLOR_FORMAT,
- kColorFormatSurface);
return std::shared_ptr<AMediaFormat>(destinationFormat, &AMediaFormat_delete);
}
@@ -70,6 +64,13 @@
mTranscodingFinishedCondition.notify_all();
}
+ virtual void onTrackStopped(const MediaTrackTranscoder* transcoder __unused) override {
+ std::unique_lock<std::mutex> lock(mMutex);
+ mTranscodingFinished = true;
+ mTranscodingStopped = true;
+ mTranscodingFinishedCondition.notify_all();
+ }
+
void onTrackError(const MediaTrackTranscoder* transcoder __unused, media_status_t status) {
std::unique_lock<std::mutex> lock(mMutex);
mTranscodingFinished = true;
@@ -93,12 +94,18 @@
}
}
+ bool transcodingWasStopped() const { return mTranscodingFinished && mTranscodingStopped; }
+ bool transcodingFinished() const {
+ return mTranscodingFinished && !mTranscodingStopped && mStatus == AMEDIA_OK;
+ }
+
private:
media_status_t mStatus = AMEDIA_OK;
std::mutex mMutex;
std::condition_variable mTranscodingFinishedCondition;
std::condition_variable mTrackFormatAvailableCondition;
bool mTranscodingFinished = false;
+ bool mTranscodingStopped = false;
bool mTrackFormatAvailable = false;
};
diff --git a/media/libmediatranscoding/transcoder/tests/VideoTrackTranscoderTests.cpp b/media/libmediatranscoding/transcoder/tests/VideoTrackTranscoderTests.cpp
index 1b5bd13..4ede97f 100644
--- a/media/libmediatranscoding/transcoder/tests/VideoTrackTranscoderTests.cpp
+++ b/media/libmediatranscoding/transcoder/tests/VideoTrackTranscoderTests.cpp
@@ -135,7 +135,6 @@
});
EXPECT_EQ(callback->waitUntilFinished(), AMEDIA_OK);
- EXPECT_TRUE(transcoder->stop());
}
TEST_F(VideoTrackTranscoderTests, PreserveBitrate) {
@@ -160,7 +159,8 @@
auto outputFormat = transcoder->getOutputFormat();
ASSERT_NE(outputFormat, nullptr);
- ASSERT_TRUE(transcoder->stop());
+ transcoder->stop();
+ EXPECT_EQ(callback->waitUntilFinished(), AMEDIA_OK);
int32_t outBitrate;
EXPECT_TRUE(AMediaFormat_getInt32(outputFormat.get(), AMEDIAFORMAT_KEY_BIT_RATE, &outBitrate));
@@ -205,7 +205,8 @@
// Wait for the encoder to output samples before stopping and releasing the transcoder.
semaphore.wait();
- EXPECT_TRUE(transcoder->stop());
+ transcoder->stop();
+ EXPECT_EQ(callback->waitUntilFinished(), AMEDIA_OK);
transcoder.reset();
// Return buffers to the codec so that it can resume processing, but keep one buffer to avoid
diff --git a/media/libmediatranscoding/transcoder/tests/build_and_run_all_unit_tests.sh b/media/libmediatranscoding/transcoder/tests/build_and_run_all_unit_tests.sh
index b848b4c..792c541 100755
--- a/media/libmediatranscoding/transcoder/tests/build_and_run_all_unit_tests.sh
+++ b/media/libmediatranscoding/transcoder/tests/build_and_run_all_unit_tests.sh
@@ -20,7 +20,7 @@
fi
# Push the files onto the device.
-. $ANDROID_BUILD_TOP/frameworks/av/media/libmediatranscoding/tests/assets/push_assets.sh
+. $ANDROID_BUILD_TOP/frameworks/av/media/libmediatranscoding/tests/push_assets.sh
echo "========================================"
diff --git a/media/libshmem/Android.bp b/media/libshmem/Android.bp
index 0e4ff48..62784ed 100644
--- a/media/libshmem/Android.bp
+++ b/media/libshmem/Android.bp
@@ -28,6 +28,11 @@
"libutils",
"shared-file-region-aidl-unstable-cpp",
],
+ target: {
+ darwin: {
+ enabled: false,
+ },
+ },
}
cc_library {
@@ -43,6 +48,11 @@
export_shared_lib_headers: [
"shared-file-region-aidl-unstable-cpp",
],
+ target: {
+ darwin: {
+ enabled: false,
+ },
+ },
}
cc_test {
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 44ee2ac..71c1b0b 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -5350,6 +5350,34 @@
if (mChannelMaskPresent) {
notify->setInt32("channel-mask", mChannelMask);
}
+
+ if (!mIsEncoder && portIndex == kPortIndexOutput) {
+ AString mime;
+ if (mConfigFormat->findString("mime", &mime)
+ && !strcasecmp(MEDIA_MIMETYPE_AUDIO_AAC, mime.c_str())) {
+
+ OMX_AUDIO_PARAM_ANDROID_AACDRCPRESENTATIONTYPE presentation;
+ InitOMXParams(&presentation);
+ err = mOMXNode->getParameter(
+ (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAacDrcPresentation,
+ &presentation, sizeof(presentation));
+ if (err != OK) {
+ return err;
+ }
+ notify->setInt32("aac-encoded-target-level",
+ presentation.nEncodedTargetLevel);
+ notify->setInt32("aac-drc-cut-level", presentation.nDrcCut);
+ notify->setInt32("aac-drc-boost-level", presentation.nDrcBoost);
+ notify->setInt32("aac-drc-heavy-compression",
+ presentation.nHeavyCompression);
+ notify->setInt32("aac-target-ref-level",
+ presentation.nTargetReferenceLevel);
+ notify->setInt32("aac-drc-effect-type", presentation.nDrcEffectType);
+ notify->setInt32("aac-drc-album-mode", presentation.nDrcAlbumMode);
+ notify->setInt32("aac-drc-output-loudness",
+ presentation.nDrcOutputLoudness);
+ }
+ }
break;
}
@@ -7810,6 +7838,58 @@
// Ignore errors as failure is expected for codecs that aren't video encoders.
(void)configureTemporalLayers(params, false /* inConfigure */, mOutputFormat);
+ AString mime;
+ if (!mIsEncoder
+ && (mConfigFormat->findString("mime", &mime))
+ && !strcasecmp(MEDIA_MIMETYPE_AUDIO_AAC, mime.c_str())) {
+ OMX_AUDIO_PARAM_ANDROID_AACDRCPRESENTATIONTYPE presentation;
+ InitOMXParams(&presentation);
+ mOMXNode->getParameter(
+ (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAacDrcPresentation,
+ &presentation, sizeof(presentation));
+ int32_t value32 = 0;
+ bool updated = false;
+ if (params->findInt32("aac-pcm-limiter-enable", &value32)) {
+ presentation.nPCMLimiterEnable = value32;
+ updated = true;
+ }
+ if (params->findInt32("aac-encoded-target-level", &value32)) {
+ presentation.nEncodedTargetLevel = value32;
+ updated = true;
+ }
+ if (params->findInt32("aac-drc-cut-level", &value32)) {
+ presentation.nDrcCut = value32;
+ updated = true;
+ }
+ if (params->findInt32("aac-drc-boost-level", &value32)) {
+ presentation.nDrcBoost = value32;
+ updated = true;
+ }
+ if (params->findInt32("aac-drc-heavy-compression", &value32)) {
+ presentation.nHeavyCompression = value32;
+ updated = true;
+ }
+ if (params->findInt32("aac-target-ref-level", &value32)) {
+ presentation.nTargetReferenceLevel = value32;
+ updated = true;
+ }
+ if (params->findInt32("aac-drc-effect-type", &value32)) {
+ presentation.nDrcEffectType = value32;
+ updated = true;
+ }
+ if (params->findInt32("aac-drc-album-mode", &value32)) {
+ presentation.nDrcAlbumMode = value32;
+ updated = true;
+ }
+ if (!params->findInt32("aac-drc-output-loudness", &value32)) {
+ presentation.nDrcOutputLoudness = value32;
+ updated = true;
+ }
+ if (updated) {
+ mOMXNode->setParameter((OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAacDrcPresentation,
+ &presentation, sizeof(presentation));
+ }
+ }
return setVendorParameters(params);
}
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
index 2aeddd7..28a7a1e 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
@@ -38,6 +38,7 @@
#define DRC_DEFAULT_MOBILE_DRC_HEAVY 1 /* switch for heavy compression for mobile conf */
#define DRC_DEFAULT_MOBILE_DRC_EFFECT 3 /* MPEG-D DRC effect type; 3 => Limited playback range */
#define DRC_DEFAULT_MOBILE_DRC_ALBUM 0 /* MPEG-D DRC album mode; 0 => album mode is disabled, 1 => album mode is enabled */
+#define DRC_DEFAULT_MOBILE_OUTPUT_LOUDNESS -1 /* decoder output loudness; -1 => the value is unknown, otherwise dB step value (e.g. 64 for -16 dB) */
#define DRC_DEFAULT_MOBILE_ENC_LEVEL (-1) /* encoder target level; -1 => the value is unknown, otherwise dB step value (e.g. 64 for -16 dB) */
#define MAX_CHANNEL_COUNT 8 /* maximum number of audio channels that can be decoded */
// names of properties that can be used to override the default DRC settings
@@ -230,6 +231,15 @@
// For seven and eight channel input streams, enable 6.1 and 7.1 channel output
aacDecoder_SetParam(mAACDecoder, AAC_PCM_MAX_OUTPUT_CHANNELS, -1);
+ mDrcCompressMode = DRC_DEFAULT_MOBILE_DRC_HEAVY;
+ mDrcTargetRefLevel = DRC_DEFAULT_MOBILE_REF_LEVEL;
+ mDrcEncTargetLevel = DRC_DEFAULT_MOBILE_ENC_LEVEL;
+ mDrcBoostFactor = DRC_DEFAULT_MOBILE_DRC_BOOST;
+ mDrcAttenuationFactor = DRC_DEFAULT_MOBILE_DRC_CUT;
+ mDrcEffectType = DRC_DEFAULT_MOBILE_DRC_EFFECT;
+ mDrcAlbumMode = DRC_DEFAULT_MOBILE_DRC_ALBUM;
+ mDrcOutputLoudness = DRC_DEFAULT_MOBILE_OUTPUT_LOUDNESS;
+
return status;
}
@@ -358,6 +368,27 @@
return OMX_ErrorNone;
}
+ case OMX_IndexParamAudioAndroidAacDrcPresentation:
+ {
+ OMX_AUDIO_PARAM_ANDROID_AACDRCPRESENTATIONTYPE *aacPresParams =
+ (OMX_AUDIO_PARAM_ANDROID_AACDRCPRESENTATIONTYPE *)params;
+
+ ALOGD("get OMX_IndexParamAudioAndroidAacDrcPresentation");
+
+ if (!isValidOMXParam(aacPresParams)) {
+ return OMX_ErrorBadParameter;
+ }
+ aacPresParams->nDrcEffectType = mDrcEffectType;
+ aacPresParams->nDrcAlbumMode = mDrcAlbumMode;
+ aacPresParams->nDrcBoost = mDrcBoostFactor;
+ aacPresParams->nDrcCut = mDrcAttenuationFactor;
+ aacPresParams->nHeavyCompression = mDrcCompressMode;
+ aacPresParams->nTargetReferenceLevel = mDrcTargetRefLevel;
+ aacPresParams->nEncodedTargetLevel = mDrcEncTargetLevel;
+ aacPresParams ->nDrcOutputLoudness = mDrcOutputLoudness;
+ return OMX_ErrorNone;
+ }
+
default:
return SimpleSoftOMXComponent::internalGetParameter(index, params);
}
@@ -464,11 +495,13 @@
if (aacPresParams->nDrcEffectType >= -1) {
ALOGV("set nDrcEffectType=%d", aacPresParams->nDrcEffectType);
aacDecoder_SetParam(mAACDecoder, AAC_UNIDRC_SET_EFFECT, aacPresParams->nDrcEffectType);
+ mDrcEffectType = aacPresParams->nDrcEffectType;
}
if (aacPresParams->nDrcAlbumMode >= -1) {
ALOGV("set nDrcAlbumMode=%d", aacPresParams->nDrcAlbumMode);
aacDecoder_SetParam(mAACDecoder, AAC_UNIDRC_ALBUM_MODE,
aacPresParams->nDrcAlbumMode);
+ mDrcAlbumMode = aacPresParams->nDrcAlbumMode;
}
bool updateDrcWrapper = false;
if (aacPresParams->nDrcBoost >= 0) {
@@ -476,34 +509,42 @@
mDrcWrap.setParam(DRC_PRES_MODE_WRAP_DESIRED_BOOST_FACTOR,
aacPresParams->nDrcBoost);
updateDrcWrapper = true;
+ mDrcBoostFactor = aacPresParams->nDrcBoost;
}
if (aacPresParams->nDrcCut >= 0) {
ALOGV("set nDrcCut=%d", aacPresParams->nDrcCut);
mDrcWrap.setParam(DRC_PRES_MODE_WRAP_DESIRED_ATT_FACTOR, aacPresParams->nDrcCut);
updateDrcWrapper = true;
+ mDrcAttenuationFactor = aacPresParams->nDrcCut;
}
if (aacPresParams->nHeavyCompression >= 0) {
ALOGV("set nHeavyCompression=%d", aacPresParams->nHeavyCompression);
mDrcWrap.setParam(DRC_PRES_MODE_WRAP_DESIRED_HEAVY,
aacPresParams->nHeavyCompression);
updateDrcWrapper = true;
+ mDrcCompressMode = aacPresParams->nHeavyCompression;
}
if (aacPresParams->nTargetReferenceLevel >= -1) {
ALOGV("set nTargetReferenceLevel=%d", aacPresParams->nTargetReferenceLevel);
mDrcWrap.setParam(DRC_PRES_MODE_WRAP_DESIRED_TARGET,
aacPresParams->nTargetReferenceLevel);
updateDrcWrapper = true;
+ mDrcTargetRefLevel = aacPresParams->nTargetReferenceLevel;
}
if (aacPresParams->nEncodedTargetLevel >= 0) {
ALOGV("set nEncodedTargetLevel=%d", aacPresParams->nEncodedTargetLevel);
mDrcWrap.setParam(DRC_PRES_MODE_WRAP_ENCODER_TARGET,
aacPresParams->nEncodedTargetLevel);
updateDrcWrapper = true;
+ mDrcEncTargetLevel = aacPresParams->nEncodedTargetLevel;
}
if (aacPresParams->nPCMLimiterEnable >= 0) {
aacDecoder_SetParam(mAACDecoder, AAC_PCM_LIMITER_ENABLE,
(aacPresParams->nPCMLimiterEnable != 0));
}
+ if (aacPresParams ->nDrcOutputLoudness != DRC_DEFAULT_MOBILE_OUTPUT_LOUDNESS) {
+ mDrcOutputLoudness = aacPresParams ->nDrcOutputLoudness;
+ }
if (updateDrcWrapper) {
mDrcWrap.update();
}
@@ -854,6 +895,11 @@
// fall through
}
+ if ( mDrcOutputLoudness != mStreamInfo->outputLoudness) {
+ ALOGD("update Loudness, before = %d, now = %d", mDrcOutputLoudness, mStreamInfo->outputLoudness);
+ mDrcOutputLoudness = mStreamInfo->outputLoudness;
+ }
+
/*
* AAC+/eAAC+ streams can be signalled in two ways: either explicitly
* or implicitly, according to MPEG4 spec. AAC+/eAAC+ is a dual
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.h b/media/libstagefright/codecs/aacdec/SoftAAC2.h
index 5bee710..9f98aa1 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.h
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.h
@@ -85,6 +85,17 @@
int32_t mOutputDelayRingBufferWritePos;
int32_t mOutputDelayRingBufferReadPos;
int32_t mOutputDelayRingBufferFilled;
+
+ //drc
+ int32_t mDrcCompressMode;
+ int32_t mDrcTargetRefLevel;
+ int32_t mDrcEncTargetLevel;
+ int32_t mDrcBoostFactor;
+ int32_t mDrcAttenuationFactor;
+ int32_t mDrcEffectType;
+ int32_t mDrcAlbumMode;
+ int32_t mDrcOutputLoudness;
+
bool outputDelayRingBufferPutSamples(INT_PCM *samples, int numSamples);
int32_t outputDelayRingBufferGetSamples(INT_PCM *samples, int numSamples);
int32_t outputDelayRingBufferSamplesAvailable();
diff --git a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
index ddb459f..44415aa 100644
--- a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
+++ b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
@@ -17,6 +17,10 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "SimpleSoftOMXComponent"
#include <utils/Log.h>
+#include <OMX_Core.h>
+#include <OMX_Audio.h>
+#include <OMX_IndexExt.h>
+#include <OMX_AudioExt.h>
#include <media/stagefright/omx/SimpleSoftOMXComponent.h>
#include <media/stagefright/foundation/ADebug.h>
@@ -74,7 +78,7 @@
OMX_U32 portIndex;
- switch (index) {
+ switch ((int)index) {
case OMX_IndexParamPortDefinition:
{
const OMX_PARAM_PORTDEFINITIONTYPE *portDefs =
@@ -108,6 +112,19 @@
break;
}
+ case OMX_IndexParamAudioAndroidAacDrcPresentation:
+ {
+ if (mState == OMX_StateInvalid) {
+ return false;
+ }
+ const OMX_AUDIO_PARAM_ANDROID_AACDRCPRESENTATIONTYPE *aacPresParams =
+ (const OMX_AUDIO_PARAM_ANDROID_AACDRCPRESENTATIONTYPE *)params;
+ if (!isValidOMXParam(aacPresParams)) {
+ return false;
+ }
+ return true;
+ }
+
default:
return false;
}
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 9ba99bc..6d96a8d 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -31,6 +31,7 @@
#include <sys/resource.h>
#include <thread>
+
#include <android/os/IExternalVibratorService.h>
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
@@ -41,8 +42,10 @@
#include <media/audiohal/DevicesFactoryHalInterface.h>
#include <media/audiohal/EffectsFactoryHalInterface.h>
#include <media/AudioParameter.h>
+#include <media/IAudioPolicyService.h>
#include <media/MediaMetricsItem.h>
#include <media/TypeConverter.h>
+#include <mediautils/TimeCheck.h>
#include <memunreachable/memunreachable.h>
#include <utils/String16.h>
#include <utils/threads.h>
@@ -69,6 +72,7 @@
#include <media/IMediaLogService.h>
#include <media/AidlConversion.h>
+#include <media/AudioValidator.h>
#include <media/nbaio/Pipe.h>
#include <media/nbaio/PipeReader.h>
#include <mediautils/BatteryNotifier.h>
@@ -181,9 +185,15 @@
// ----------------------------------------------------------------------------
+void AudioFlinger::instantiate() {
+ sp<IServiceManager> sm(defaultServiceManager());
+ sm->addService(String16(IAudioFlinger::DEFAULT_SERVICE_NAME),
+ new AudioFlingerServerAdapter(new AudioFlinger()), false,
+ IServiceManager::DUMP_FLAG_PRIORITY_DEFAULT);
+}
+
AudioFlinger::AudioFlinger()
- : BnAudioFlinger(),
- mMediaLogNotifier(new AudioFlinger::MediaLogNotifier()),
+ : mMediaLogNotifier(new AudioFlinger::MediaLogNotifier()),
mPrimaryHardwareDev(NULL),
mAudioHwDevs(NULL),
mHardwareStatus(AUDIO_HW_IDLE),
@@ -2335,6 +2345,11 @@
{
ALOGV(__func__);
+ status_t status = AudioValidator::validateAudioPortConfig(*config);
+ if (status != NO_ERROR) {
+ return status;
+ }
+
audio_module_handle_t module;
if (config->type == AUDIO_PORT_TYPE_DEVICE) {
module = config->ext.device.hw_module;
@@ -4033,10 +4048,108 @@
// ----------------------------------------------------------------------------
-status_t AudioFlinger::onTransact(
- uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+status_t AudioFlinger::onPreTransact(
+ TransactionCode code, const Parcel& /* data */, uint32_t /* flags */)
{
- return BnAudioFlinger::onTransact(code, data, reply, flags);
+ // make sure transactions reserved to AudioPolicyManager do not come from other processes
+ switch (code) {
+ case TransactionCode::SET_STREAM_VOLUME:
+ case TransactionCode::SET_STREAM_MUTE:
+ case TransactionCode::OPEN_OUTPUT:
+ case TransactionCode::OPEN_DUPLICATE_OUTPUT:
+ case TransactionCode::CLOSE_OUTPUT:
+ case TransactionCode::SUSPEND_OUTPUT:
+ case TransactionCode::RESTORE_OUTPUT:
+ case TransactionCode::OPEN_INPUT:
+ case TransactionCode::CLOSE_INPUT:
+ case TransactionCode::INVALIDATE_STREAM:
+ case TransactionCode::SET_VOICE_VOLUME:
+ case TransactionCode::MOVE_EFFECTS:
+ case TransactionCode::SET_EFFECT_SUSPENDED:
+ case TransactionCode::LOAD_HW_MODULE:
+ case TransactionCode::GET_AUDIO_PORT:
+ case TransactionCode::CREATE_AUDIO_PATCH:
+ case TransactionCode::RELEASE_AUDIO_PATCH:
+ case TransactionCode::LIST_AUDIO_PATCHES:
+ case TransactionCode::SET_AUDIO_PORT_CONFIG:
+ case TransactionCode::SET_RECORD_SILENCED:
+ ALOGW("%s: transaction %d received from PID %d",
+ __func__, code, IPCThreadState::self()->getCallingPid());
+ // return status only for non void methods
+ switch (code) {
+ case TransactionCode::SET_RECORD_SILENCED:
+ case TransactionCode::SET_EFFECT_SUSPENDED:
+ break;
+ default:
+ return INVALID_OPERATION;
+ }
+ return OK;
+ default:
+ break;
+ }
+
+ // make sure the following transactions come from system components
+ switch (code) {
+ case TransactionCode::SET_MASTER_VOLUME:
+ case TransactionCode::SET_MASTER_MUTE:
+ case TransactionCode::SET_MODE:
+ case TransactionCode::SET_MIC_MUTE:
+ case TransactionCode::SET_LOW_RAM_DEVICE:
+ case TransactionCode::SYSTEM_READY:
+ case TransactionCode::SET_AUDIO_HAL_PIDS: {
+ if (!isServiceUid(IPCThreadState::self()->getCallingUid())) {
+ ALOGW("%s: transaction %d received from PID %d unauthorized UID %d",
+ __func__, code, IPCThreadState::self()->getCallingPid(),
+ IPCThreadState::self()->getCallingUid());
+ // return status only for non void methods
+ switch (code) {
+ case TransactionCode::SYSTEM_READY:
+ break;
+ default:
+ return INVALID_OPERATION;
+ }
+ return OK;
+ }
+ } break;
+ default:
+ break;
+ }
+
+ // List of relevant events that trigger log merging.
+ // Log merging should activate during audio activity of any kind. This are considered the
+ // most relevant events.
+ // TODO should select more wisely the items from the list
+ switch (code) {
+ case TransactionCode::CREATE_TRACK:
+ case TransactionCode::CREATE_RECORD:
+ case TransactionCode::SET_MASTER_VOLUME:
+ case TransactionCode::SET_MASTER_MUTE:
+ case TransactionCode::SET_MIC_MUTE:
+ case TransactionCode::SET_PARAMETERS:
+ case TransactionCode::CREATE_EFFECT:
+ case TransactionCode::SYSTEM_READY: {
+ requestLogMerge();
+ break;
+ }
+ default:
+ break;
+ }
+
+ std::string tag("IAudioFlinger command " +
+ std::to_string(static_cast<std::underlying_type_t<TransactionCode>>(code)));
+ TimeCheck check(tag.c_str());
+
+ // Make sure we connect to Audio Policy Service before calling into AudioFlinger:
+ // - AudioFlinger can call into Audio Policy Service with its global mutex held
+ // - If this is the first time Audio Policy Service is queried from inside audioserver process
+ // this will trigger Audio Policy Manager initialization.
+ // - Audio Policy Manager initialization calls into AudioFlinger which will try to lock
+ // its global mutex and a deadlock will occur.
+ if (IPCThreadState::self()->getCallingPid() != getpid()) {
+ AudioSystem::get_audio_policy_service();
+ }
+
+ return OK;
}
} // namespace android
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 6dfc48f..1cf1e67 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -123,16 +123,12 @@
#define INCLUDING_FROM_AUDIOFLINGER_H
-class AudioFlinger :
- public BinderService<AudioFlinger>,
- public BnAudioFlinger
+class AudioFlinger : public AudioFlingerServerAdapter::Delegate
{
- friend class BinderService<AudioFlinger>; // for AudioFlinger()
-
public:
- static const char* getServiceName() ANDROID_API { return "media.audio_flinger"; }
+ static void instantiate() ANDROID_API;
- virtual status_t dump(int fd, const Vector<String16>& args);
+ status_t dump(int fd, const Vector<String16>& args) override;
// IAudioFlinger interface, in binder opcode order
status_t createTrack(const media::CreateTrackRequest& input,
@@ -270,11 +266,7 @@
virtual status_t setAudioHalPids(const std::vector<pid_t>& pids);
- virtual status_t onTransact(
- uint32_t code,
- const Parcel& data,
- Parcel* reply,
- uint32_t flags);
+ status_t onPreTransact(TransactionCode code, const Parcel& data, uint32_t flags) override;
// end of IAudioFlinger interface
@@ -519,6 +511,7 @@
const sp<MediaLogNotifier> mMediaLogNotifier;
// This is a helper that is called during incoming binder calls.
+ // Requests media.log to start merging log buffers
void requestLogMerge();
class TrackHandle;
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index b956b96..1e11660 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -25,6 +25,7 @@
#include "AudioFlinger.h"
#include <media/AudioParameter.h>
+#include <media/AudioValidator.h>
#include <media/DeviceDescriptorBase.h>
#include <media/PatchBuilder.h>
#include <mediautils/ServiceUtilities.h>
@@ -56,6 +57,11 @@
/* Get supported attributes for a given audio port */
status_t AudioFlinger::getAudioPort(struct audio_port_v7 *port) {
+ status_t status = AudioValidator::validateAudioPort(*port);
+ if (status != NO_ERROR) {
+ return status;
+ }
+
Mutex::Autolock _l(mLock);
return mPatchPanel.getAudioPort(port);
}
@@ -64,6 +70,11 @@
status_t AudioFlinger::createAudioPatch(const struct audio_patch *patch,
audio_patch_handle_t *handle)
{
+ status_t status = AudioValidator::validateAudioPatch(*patch);
+ if (status != NO_ERROR) {
+ return status;
+ }
+
Mutex::Autolock _l(mLock);
return mPatchPanel.createAudioPatch(patch, handle);
}
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index b13b7be..ab2bc32 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -8684,6 +8684,7 @@
void AudioFlinger::RecordThread::updateOutDevices(const DeviceDescriptorBaseVector& outDevices)
{
+ Mutex::Autolock _l(mLock);
mOutDevices = outDevices;
mOutDeviceTypeAddrs = deviceTypeAddrsFromDescriptors(mOutDevices);
for (size_t i = 0; i < mEffectChains.size(); i++) {
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index 1d9223e..1302486 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -182,6 +182,7 @@
* Active ref count of the client will be incremented/decremented through setActive API
*/
virtual void setClientActive(const sp<TrackClientDescriptor>& client, bool active);
+ bool isClientActive(const sp<TrackClientDescriptor>& client);
bool isActive(uint32_t inPastMs) const;
bool isActive(VolumeSource volumeSource = VOLUME_SOURCE_NONE,
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 25f7c27..1756021 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -123,6 +123,12 @@
client->setActive(active);
}
+bool AudioOutputDescriptor::isClientActive(const sp<TrackClientDescriptor>& client)
+{
+ return client != nullptr &&
+ std::find(begin(mActiveClients), end(mActiveClients), client) != end(mActiveClients);
+}
+
bool AudioOutputDescriptor::isActive(VolumeSource vs, uint32_t inPastMs, nsecs_t sysTime) const
{
return (vs == VOLUME_SOURCE_NONE) ?
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 7b8a2ea..cc4ec36 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -1954,6 +1954,12 @@
ALOGV("releaseOutput() %d", outputDesc->mIoHandle);
+ sp<TrackClientDescriptor> client = outputDesc->getClient(portId);
+ if (outputDesc->isClientActive(client)) {
+ ALOGW("releaseOutput() inactivates portId %d in good faith", portId);
+ stopOutput(portId);
+ }
+
if (outputDesc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
if (outputDesc->mDirectOpenCount <= 0) {
ALOGW("releaseOutput() invalid open count %d for output %d",
@@ -1965,9 +1971,7 @@
mpClientInterface->onAudioPortListUpdate();
}
}
- // stopOutput() needs to be successfully called before releaseOutput()
- // otherwise there may be inaccurate stream reference counts.
- // This is checked in outputDesc->removeClient below.
+
outputDesc->removeClient(portId);
}
diff --git a/services/camera/libcameraservice/device3/ZoomRatioMapper.cpp b/services/camera/libcameraservice/device3/ZoomRatioMapper.cpp
index 81d7bf9..1bc2081 100644
--- a/services/camera/libcameraservice/device3/ZoomRatioMapper.cpp
+++ b/services/camera/libcameraservice/device3/ZoomRatioMapper.cpp
@@ -168,6 +168,19 @@
entry = request->find(ANDROID_CONTROL_ZOOM_RATIO);
if (entry.count == 1 && entry.data.f[0] != 1.0f) {
zoomRatioIs1 = false;
+
+ // If cropRegion is windowboxing, override it with activeArray
+ camera_metadata_entry_t cropRegionEntry = request->find(ANDROID_SCALER_CROP_REGION);
+ if (cropRegionEntry.count == 4) {
+ int cropWidth = cropRegionEntry.data.i32[2];
+ int cropHeight = cropRegionEntry.data.i32[3];
+ if (cropWidth < mArrayWidth && cropHeight < mArrayHeight) {
+ cropRegionEntry.data.i32[0] = 0;
+ cropRegionEntry.data.i32[1] = 0;
+ cropRegionEntry.data.i32[2] = mArrayWidth;
+ cropRegionEntry.data.i32[3] = mArrayHeight;
+ }
+ }
}
if (mHalSupportsZoomRatio && zoomRatioIs1) {
diff --git a/services/mediaresourcemanager/ResourceManagerService.cpp b/services/mediaresourcemanager/ResourceManagerService.cpp
index 32ac583..289cffd 100644
--- a/services/mediaresourcemanager/ResourceManagerService.cpp
+++ b/services/mediaresourcemanager/ResourceManagerService.cpp
@@ -22,6 +22,7 @@
#include <android/binder_manager.h>
#include <android/binder_process.h>
#include <binder/IMediaResourceMonitor.h>
+#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
#include <cutils/sched_policy.h>
#include <dirent.h>
@@ -96,7 +97,7 @@
service->overridePid(mPid, -1);
// thiz is freed in the call below, so it must be last call referring thiz
- service->removeResource(mPid, mClientId, false);
+ service->removeResource(mPid, mClientId, false /*checkValid*/);
}
class OverrideProcessInfoDeathNotifier : public DeathNotifier {
@@ -422,8 +423,12 @@
Mutex::Autolock lock(mLock);
if (!mProcessInfo->isValidPid(pid)) {
- ALOGE("Rejected addResource call with invalid pid.");
- return Status::fromServiceSpecificError(BAD_VALUE);
+ pid_t callingPid = IPCThreadState::self()->getCallingPid();
+ uid_t callingUid = IPCThreadState::self()->getCallingUid();
+ ALOGW("%s called with untrusted pid %d, using calling pid %d, uid %d", __FUNCTION__,
+ pid, callingPid, callingUid);
+ pid = callingPid;
+ uid = callingUid;
}
ResourceInfos& infos = getResourceInfosForEdit(pid, mMap);
ResourceInfo& info = getResourceInfoForEdit(uid, clientId, client, infos);
@@ -477,8 +482,10 @@
Mutex::Autolock lock(mLock);
if (!mProcessInfo->isValidPid(pid)) {
- ALOGE("Rejected removeResource call with invalid pid.");
- return Status::fromServiceSpecificError(BAD_VALUE);
+ pid_t callingPid = IPCThreadState::self()->getCallingPid();
+ ALOGW("%s called with untrusted pid %d, using calling pid %d", __FUNCTION__,
+ pid, callingPid);
+ pid = callingPid;
}
ssize_t index = mMap.indexOfKey(pid);
if (index < 0) {
@@ -531,7 +538,7 @@
}
Status ResourceManagerService::removeClient(int32_t pid, int64_t clientId) {
- removeResource(pid, clientId, true);
+ removeResource(pid, clientId, true /*checkValid*/);
return Status::ok();
}
@@ -543,8 +550,10 @@
Mutex::Autolock lock(mLock);
if (checkValid && !mProcessInfo->isValidPid(pid)) {
- ALOGE("Rejected removeResource call with invalid pid.");
- return Status::fromServiceSpecificError(BAD_VALUE);
+ pid_t callingPid = IPCThreadState::self()->getCallingPid();
+ ALOGW("%s called with untrusted pid %d, using calling pid %d", __FUNCTION__,
+ pid, callingPid);
+ pid = callingPid;
}
ssize_t index = mMap.indexOfKey(pid);
if (index < 0) {
@@ -599,8 +608,10 @@
{
Mutex::Autolock lock(mLock);
if (!mProcessInfo->isValidPid(callingPid)) {
- ALOGE("Rejected reclaimResource call with invalid callingPid.");
- return Status::fromServiceSpecificError(BAD_VALUE);
+ pid_t actualCallingPid = IPCThreadState::self()->getCallingPid();
+ ALOGW("%s called with untrusted pid %d, using actual calling pid %d", __FUNCTION__,
+ callingPid, actualCallingPid);
+ callingPid = actualCallingPid;
}
const MediaResourceParcel *secureCodec = NULL;
const MediaResourceParcel *nonSecureCodec = NULL;
@@ -836,8 +847,10 @@
Mutex::Autolock lock(mLock);
if (!mProcessInfo->isValidPid(pid)) {
- ALOGE("Rejected markClientForPendingRemoval call with invalid pid.");
- return Status::fromServiceSpecificError(BAD_VALUE);
+ pid_t callingPid = IPCThreadState::self()->getCallingPid();
+ ALOGW("%s called with untrusted pid %d, using calling pid %d", __FUNCTION__,
+ pid, callingPid);
+ pid = callingPid;
}
ssize_t index = mMap.indexOfKey(pid);
if (index < 0) {
@@ -866,8 +879,10 @@
{
Mutex::Autolock lock(mLock);
if (!mProcessInfo->isValidPid(pid)) {
- ALOGE("Rejected reclaimResourcesFromClientsPendingRemoval call with invalid pid.");
- return Status::fromServiceSpecificError(BAD_VALUE);
+ pid_t callingPid = IPCThreadState::self()->getCallingPid();
+ ALOGW("%s called with untrusted pid %d, using calling pid %d", __FUNCTION__,
+ pid, callingPid);
+ pid = callingPid;
}
for (MediaResource::Type type : {MediaResource::Type::kSecureCodec,
diff --git a/services/mediatranscoding/tests/build_and_run_all_unit_tests.sh b/services/mediatranscoding/tests/build_and_run_all_unit_tests.sh
index 1b42a22..edf6778 100755
--- a/services/mediatranscoding/tests/build_and_run_all_unit_tests.sh
+++ b/services/mediatranscoding/tests/build_and_run_all_unit_tests.sh
@@ -14,7 +14,7 @@
mm
# Push the files onto the device.
-. $ANDROID_BUILD_TOP/frameworks/av/media/libmediatranscoding/tests/assets/push_assets.sh
+. $ANDROID_BUILD_TOP/frameworks/av/media/libmediatranscoding/tests/push_assets.sh
echo "[==========] installing test apps"
adb root