Merge "Revert "refactor AudioRecord and AudioFlinger openRecord()""
diff --git a/camera/Android.bp b/camera/Android.bp
index c76ae50..24b3918 100644
--- a/camera/Android.bp
+++ b/camera/Android.bp
@@ -29,12 +29,7 @@
// AIDL files for camera interfaces
// The headers for these interfaces will be available to any modules that
// include libcamera_client, at the path "aidl/package/path/BnFoo.h"
- "aidl/android/hardware/ICameraService.aidl",
- "aidl/android/hardware/ICameraServiceListener.aidl",
- "aidl/android/hardware/ICameraServiceProxy.aidl",
- "aidl/android/hardware/camera2/ICameraDeviceCallbacks.aidl",
- "aidl/android/hardware/camera2/ICameraDeviceUser.aidl",
-
+ ":libcamera_client_aidl",
// Source for camera interface parcelables, and manually-written interfaces
"Camera.cpp",
@@ -81,3 +76,25 @@
],
}
+
+// AIDL interface between camera clients and the camera service.
+filegroup {
+ name: "libcamera_client_aidl",
+ srcs: [
+ "aidl/android/hardware/ICameraService.aidl",
+ "aidl/android/hardware/ICameraServiceListener.aidl",
+ "aidl/android/hardware/ICameraServiceProxy.aidl",
+ "aidl/android/hardware/camera2/ICameraDeviceCallbacks.aidl",
+ "aidl/android/hardware/camera2/ICameraDeviceUser.aidl",
+ ],
+}
+
+// Extra AIDL files that are used by framework.jar but not libcamera_client
+// because they have hand-written native implementations.
+filegroup {
+ name: "libcamera_client_framework_aidl",
+ srcs: [
+ "aidl/android/hardware/ICamera.aidl",
+ "aidl/android/hardware/ICameraClient.aidl",
+ ],
+}
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 1793877..4f893f1 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -1635,6 +1635,28 @@
*/
ACAMERA_CONTROL_ENABLE_ZSL = // byte (acamera_metadata_enum_android_control_enable_zsl_t)
ACAMERA_CONTROL_START + 41,
+ /**
+ * <p>Whether a significant scene change is detected within the currently-set AF
+ * region(s).</p>
+ *
+ * <p>Type: int32 (acamera_metadata_enum_android_control_af_scene_change_t)</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+ * </ul></p>
+ *
+ * <p>When the camera focus routine detects a change in the scene it is looking at,
+ * such as a large shift in camera viewpoint, significant motion in the scene, or a
+ * significant illumination change, this value will be set to DETECTED for a single capture
+ * result. Otherwise the value will be NOT_DETECTED. The threshold for detection is similar
+ * to what would trigger a new passive focus scan to begin in CONTINUOUS autofocus modes.</p>
+ * <p>afSceneChange may be DETECTED only if afMode is AF_MODE_CONTINUOUS_VIDEO or
+ * AF_MODE_CONTINUOUS_PICTURE. In other AF modes, afSceneChange must be NOT_DETECTED.</p>
+ * <p>This key will be available if the camera device advertises this key via {@link ACAMERA_REQUEST_AVAILABLE_RESULT_KEYS }.</p>
+ */
+ ACAMERA_CONTROL_AF_SCENE_CHANGE = // int32 (acamera_metadata_enum_android_control_af_scene_change_t)
+ ACAMERA_CONTROL_START + 42,
ACAMERA_CONTROL_END,
/**
@@ -6115,6 +6137,20 @@
} acamera_metadata_enum_android_control_enable_zsl_t;
+// ACAMERA_CONTROL_AF_SCENE_CHANGE
+typedef enum acamera_metadata_enum_acamera_control_af_scene_change {
+ /**
+ * <p>Scene change is not detected within the AF region(s).</p>
+ */
+ ACAMERA_CONTROL_AF_SCENE_CHANGE_NOT_DETECTED = 0,
+
+ /**
+ * <p>Scene change is detected within the AF region(s).</p>
+ */
+ ACAMERA_CONTROL_AF_SCENE_CHANGE_DETECTED = 1,
+
+} acamera_metadata_enum_android_control_af_scene_change_t;
+
// ACAMERA_EDGE_MODE
diff --git a/drm/mediadrm/plugins/clearkey/ClearKeyDrmProperties.h b/drm/mediadrm/plugins/clearkey/ClearKeyDrmProperties.h
new file mode 100644
index 0000000..a99e174
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/ClearKeyDrmProperties.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CLEARKEY_DRM_PROPERTIES_H_
+#define CLEARKEY_DRM_PROPERTIES_H_
+
+#include <utils/String8.h>
+
+namespace clearkeydrm {
+
+static const android::String8 kVendorKey("vendor");
+static const android::String8 kVendorValue("Google");
+static const android::String8 kVersionKey("version");
+static const android::String8 kVersionValue("1.0");
+static const android::String8 kPluginDescriptionKey("description");
+static const android::String8 kPluginDescriptionValue("ClearKey CDM");
+static const android::String8 kAlgorithmsKey("algorithms");
+static const android::String8 kAlgorithmsValue("");
+static const android::String8 kListenerTestSupportKey("listenerTestSupport");
+static const android::String8 kListenerTestSupportValue("true");
+
+static const android::String8 kDeviceIdKey("deviceId");
+static const uint8_t kTestDeviceIdData[] =
+ {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
+ 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf};
+} // namespace clearkeydrm
+
+#endif // CLEARKEY_DRM_PROPERTIES_H_
diff --git a/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp
index ec07d87..7c43994 100644
--- a/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp
@@ -22,7 +22,7 @@
#include <utils/StrongPointer.h>
#include "DrmPlugin.h"
-
+#include "ClearKeyDrmProperties.h"
#include "Session.h"
namespace {
@@ -44,7 +44,22 @@
DrmPlugin::DrmPlugin(SessionLibrary* sessionLibrary)
: mSessionLibrary(sessionLibrary) {
+
mPlayPolicy.clear();
+ initProperties();
+}
+
+void DrmPlugin::initProperties() {
+ mStringProperties.clear();
+ mStringProperties.add(kVendorKey, kVendorValue);
+ mStringProperties.add(kVersionKey, kVersionValue);
+ mStringProperties.add(kPluginDescriptionKey, kPluginDescriptionValue);
+ mStringProperties.add(kAlgorithmsKey, kAlgorithmsValue);
+ mStringProperties.add(kListenerTestSupportKey, kListenerTestSupportValue);
+
+ Vector<uint8_t> testDeviceId;
+ testDeviceId.appendArray(kTestDeviceIdData, sizeof(kTestDeviceIdData) / sizeof(uint8_t));
+ mByteArrayProperties.add(kDeviceIdKey, testDeviceId);
}
status_t DrmPlugin::openSession(Vector<uint8_t>& sessionId) {
@@ -122,21 +137,57 @@
return res;
}
+status_t DrmPlugin::getPropertyByteArray(
+ const String8& name, Vector<uint8_t>& value) const {
+ ssize_t index = mByteArrayProperties.indexOfKey(name);
+ if (index < 0) {
+ ALOGE("App requested unknown property: %s", name.string());
+ return android::BAD_VALUE;
+ }
+ value = mByteArrayProperties.valueAt(index);
+ return android::OK;
+}
+
+status_t DrmPlugin::setPropertyByteArray(
+ const String8& name, const Vector<uint8_t>& value) {
+ if (0 == name.compare(kDeviceIdKey)) {
+ ALOGD("Cannot set immutable property: %s", name.string());
+ return android::BAD_VALUE;
+ }
+
+ ssize_t status = mByteArrayProperties.replaceValueFor(name, value);
+ if (status >= 0) {
+ return android::OK;
+ }
+ ALOGE("Failed to set property byte array, key=%s", name.string());
+ return android::BAD_VALUE;
+}
+
status_t DrmPlugin::getPropertyString(
const String8& name, String8& value) const {
- if (name == "vendor") {
- value = "Google";
- } else if (name == "version") {
- value = "1.0";
- } else if (name == "description") {
- value = "ClearKey CDM";
- } else if (name == "algorithms") {
- value = "";
- } else if (name == "listenerTestSupport") {
- value = "true";
- } else {
- ALOGE("App requested unknown string property %s", name.string());
- return android::ERROR_DRM_CANNOT_HANDLE;
+ ssize_t index = mStringProperties.indexOfKey(name);
+ if (index < 0) {
+ ALOGE("App requested unknown property: %s", name.string());
+ return android::BAD_VALUE;
+ }
+ value = mStringProperties.valueAt(index);
+ return android::OK;
+}
+
+status_t DrmPlugin::setPropertyString(
+ const String8& name, const String8& value) {
+ String8 immutableKeys;
+ immutableKeys.appendFormat("%s,%s,%s,%s",
+ kAlgorithmsKey.string(), kPluginDescriptionKey.string(),
+ kVendorKey.string(), kVersionKey.string());
+ if (immutableKeys.contains(name.string())) {
+ ALOGD("Cannot set immutable property: %s", name.string());
+ return android::BAD_VALUE;
+ }
+
+ if (mStringProperties.add(name, value) < 0) {
+ ALOGE("Failed to set property string, key=%s", name.string());
+ return android::BAD_VALUE;
}
return android::OK;
}
diff --git a/drm/mediadrm/plugins/clearkey/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/DrmPlugin.h
index f37a706..62bc86f 100644
--- a/drm/mediadrm/plugins/clearkey/DrmPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/DrmPlugin.h
@@ -137,25 +137,13 @@
const String8& name, String8& value) const;
virtual status_t getPropertyByteArray(
- const String8& name, Vector<uint8_t>& value) const {
- UNUSED(name);
- UNUSED(value);
- return android::ERROR_DRM_CANNOT_HANDLE;
- }
+ const String8& name, Vector<uint8_t>& value) const;
virtual status_t setPropertyString(
- const String8& name, const String8& value) {
- UNUSED(name);
- UNUSED(value);
- return android::ERROR_DRM_CANNOT_HANDLE;
- }
+ const String8& name, const String8& value);
virtual status_t setPropertyByteArray(
- const String8& name, const Vector<uint8_t>& value) {
- UNUSED(name);
- UNUSED(value);
- return android::ERROR_DRM_CANNOT_HANDLE;
- }
+ const String8& name, const Vector<uint8_t>& value);
virtual status_t setCipherAlgorithm(
const Vector<uint8_t>& sessionId, const String8& algorithm) {
@@ -242,9 +230,13 @@
}
private:
+ void initProperties();
void setPlayPolicy();
- android::KeyedVector<android::String8, android::String8> mPlayPolicy;
+ android::KeyedVector<String8, String8> mPlayPolicy;
+ android::KeyedVector<String8, String8> mStringProperties;
+ android::KeyedVector<String8, Vector<uint8_t>> mByteArrayProperties;
+
SessionLibrary* mSessionLibrary;
DISALLOW_EVIL_CONSTRUCTORS(DrmPlugin);
diff --git a/media/libaaudio/examples/input_monitor/Android.bp b/media/libaaudio/examples/input_monitor/Android.bp
index 2c3418d..d8c5843 100644
--- a/media/libaaudio/examples/input_monitor/Android.bp
+++ b/media/libaaudio/examples/input_monitor/Android.bp
@@ -2,6 +2,7 @@
name: "input_monitor",
gtest: false,
srcs: ["src/input_monitor.cpp"],
+ cflags: ["-Wall", "-Werror"],
shared_libs: ["libaaudio"],
header_libs: ["libaaudio_example_utils"],
}
@@ -10,6 +11,7 @@
name: "input_monitor_callback",
gtest: false,
srcs: ["src/input_monitor_callback.cpp"],
+ cflags: ["-Wall", "-Werror"],
shared_libs: ["libaaudio"],
header_libs: ["libaaudio_example_utils"],
}
diff --git a/media/libaaudio/examples/loopback/Android.bp b/media/libaaudio/examples/loopback/Android.bp
index 2b624a8..fa8fdc9 100644
--- a/media/libaaudio/examples/loopback/Android.bp
+++ b/media/libaaudio/examples/loopback/Android.bp
@@ -2,6 +2,7 @@
name: "aaudio_loopback",
gtest: false,
srcs: ["src/loopback.cpp"],
+ cflags: ["-Wall", "-Werror"],
shared_libs: ["libaaudio"],
header_libs: ["libaaudio_example_utils"],
}
diff --git a/media/libaaudio/examples/write_sine/Android.bp b/media/libaaudio/examples/write_sine/Android.bp
index f162e85..aa25e67 100644
--- a/media/libaaudio/examples/write_sine/Android.bp
+++ b/media/libaaudio/examples/write_sine/Android.bp
@@ -1,6 +1,7 @@
cc_test {
name: "write_sine",
srcs: ["src/write_sine.cpp"],
+ cflags: ["-Wall", "-Werror"],
shared_libs: ["libaaudio"],
header_libs: ["libaaudio_example_utils"],
}
@@ -8,6 +9,7 @@
cc_test {
name: "write_sine_callback",
srcs: ["src/write_sine_callback.cpp"],
+ cflags: ["-Wall", "-Werror"],
shared_libs: ["libaaudio"],
header_libs: ["libaaudio_example_utils"],
}
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index 2450920..fc5830a 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -25,6 +25,7 @@
#include "aaudio/AAudio.h"
#include <aaudio/AAudioTesting.h>
+#include <math.h>
#include "utility/AAudioUtilities.h"
@@ -50,44 +51,10 @@
return size;
}
-
// TODO expose and call clamp16_from_float function in primitives.h
static inline int16_t clamp16_from_float(float f) {
- /* Offset is used to expand the valid range of [-1.0, 1.0) into the 16 lsbs of the
- * floating point significand. The normal shift is 3<<22, but the -15 offset
- * is used to multiply by 32768.
- */
- static const float offset = (float)(3 << (22 - 15));
- /* zero = (0x10f << 22) = 0x43c00000 (not directly used) */
- static const int32_t limneg = (0x10f << 22) /*zero*/ - 32768; /* 0x43bf8000 */
- static const int32_t limpos = (0x10f << 22) /*zero*/ + 32767; /* 0x43c07fff */
-
- union {
- float f;
- int32_t i;
- } u;
-
- u.f = f + offset; /* recenter valid range */
- /* Now the valid range is represented as integers between [limneg, limpos].
- * Clamp using the fact that float representation (as an integer) is an ordered set.
- */
- if (u.i < limneg)
- u.i = -32768;
- else if (u.i > limpos)
- u.i = 32767;
- return u.i; /* Return lower 16 bits, the part of interest in the significand. */
-}
-
-// Same but without clipping.
-// Convert -1.0f to +1.0f to -32768 to +32767
-static inline int16_t floatToInt16(float f) {
- static const float offset = (float)(3 << (22 - 15));
- union {
- float f;
- int32_t i;
- } u;
- u.f = f + offset; /* recenter valid range */
- return u.i; /* Return lower 16 bits, the part of interest in the significand. */
+ static const float scale = 1 << 15;
+ return (int16_t) roundf(fmaxf(fminf(f * scale, scale - 1.f), -scale));
}
static float clipAndClampFloatToPcm16(float sample, float scaler) {
@@ -188,13 +155,14 @@
int32_t samplesPerFrame,
float amplitude1,
float amplitude2) {
- float scaler = amplitude1 / SHORT_SCALE;
- float delta = (amplitude2 - amplitude1) / (SHORT_SCALE * (float) numFrames);
+ // Because we are converting from int16 to 1nt16, we do not have to scale by 1/32768.
+ float scaler = amplitude1;
+ float delta = (amplitude2 - amplitude1) / numFrames;
for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
for (int sampleIndex = 0; sampleIndex < samplesPerFrame; sampleIndex++) {
// No need to clip because int16_t range is inherently limited.
float sample = *source++ * scaler;
- *destination++ = floatToInt16(sample);
+ *destination++ = (int16_t) roundf(sample);
}
scaler += delta;
}
diff --git a/media/libaaudio/tests/Android.bp b/media/libaaudio/tests/Android.bp
index 884a2b3..9f80695 100644
--- a/media/libaaudio/tests/Android.bp
+++ b/media/libaaudio/tests/Android.bp
@@ -111,3 +111,16 @@
"libutils",
],
}
+
+cc_test {
+ name: "test_aaudio_monkey",
+ defaults: ["libaaudio_tests_defaults"],
+ srcs: ["test_aaudio_monkey.cpp"],
+ header_libs: ["libaaudio_example_utils"],
+ shared_libs: [
+ "libaaudio",
+ "libbinder",
+ "libcutils",
+ "libutils",
+ ],
+}
diff --git a/media/libaaudio/tests/test_aaudio_monkey.cpp b/media/libaaudio/tests/test_aaudio_monkey.cpp
new file mode 100644
index 0000000..be54835
--- /dev/null
+++ b/media/libaaudio/tests/test_aaudio_monkey.cpp
@@ -0,0 +1,307 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Try to trigger bugs by playing randomly on multiple streams.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <vector>
+
+#include <aaudio/AAudio.h>
+#include "AAudioArgsParser.h"
+#include "AAudioExampleUtils.h"
+#include "AAudioSimplePlayer.h"
+#include "SineGenerator.h"
+
+#define DEFAULT_TIMEOUT_NANOS (1 * NANOS_PER_SECOND)
+
+#define NUM_LOOPS 1000
+#define MAX_MICROS_DELAY (2 * 1000 * 1000)
+
+// TODO Consider adding an input stream.
+#define PROB_START (0.20)
+#define PROB_PAUSE (PROB_START + 0.10)
+#define PROB_FLUSH (PROB_PAUSE + 0.10)
+#define PROB_STOP (PROB_FLUSH + 0.10)
+#define PROB_CLOSE (PROB_STOP + 0.10)
+static_assert(PROB_CLOSE < 0.9, "Probability sum too high.");
+
+aaudio_data_callback_result_t AAudioMonkeyDataCallback(
+ AAudioStream *stream,
+ void *userData,
+ void *audioData,
+ int32_t numFrames);
+
+void AAudioMonkeyErrorCallbackProc(
+ AAudioStream *stream __unused,
+ void *userData __unused,
+ aaudio_result_t error) {
+ printf("Error Callback, error: %d\n",(int)error);
+}
+
+// This function is not thread safe. Only use this from a single thread.
+double nextRandomDouble() {
+ return drand48();
+}
+
+class AAudioMonkey : public AAudioSimplePlayer {
+public:
+
+ AAudioMonkey(int index, AAudioArgsParser *argParser)
+ : mArgParser(argParser)
+ , mIndex(index) {}
+
+ aaudio_result_t open() {
+ printf("Monkey # %d ---------------------------------------------- OPEN\n", mIndex);
+ double offset = mIndex * 50;
+ mSine1.setup(440.0, 48000);
+ mSine1.setSweep(300.0 + offset, 600.0 + offset, 5.0);
+ mSine2.setup(660.0, 48000);
+ mSine2.setSweep(350.0 + offset, 900.0 + offset, 7.0);
+
+ aaudio_result_t result = AAudioSimplePlayer::open(*mArgParser,
+ AAudioMonkeyDataCallback,
+ AAudioMonkeyErrorCallbackProc,
+ this);
+ if (result != AAUDIO_OK) {
+ printf("ERROR - player.open() returned %d\n", result);
+ }
+
+ mArgParser->compareWithStream(getStream());
+ return result;
+ }
+
+ bool isOpen() {
+ return (getStream() != nullptr);
+
+ }
+ /**
+ *
+ * @return true if stream passes tests
+ */
+ bool validate() {
+ if (!isOpen()) return true; // closed is OK
+
+ // update and query stream state
+ aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNKNOWN;
+ aaudio_result_t result = AAudioStream_waitForStateChange(getStream(),
+ AAUDIO_STREAM_STATE_UNKNOWN, &state, 0);
+ if (result != AAUDIO_OK) {
+ printf("ERROR - AAudioStream_waitForStateChange returned %d\n", result);
+ return false;
+ }
+
+ int64_t framesRead = AAudioStream_getFramesRead(getStream());
+ int64_t framesWritten = AAudioStream_getFramesWritten(getStream());
+ int32_t xRuns = AAudioStream_getXRunCount(getStream());
+ // Print status
+ printf("%30s, framesWritten = %8lld, framesRead = %8lld, xRuns = %d\n",
+ AAudio_convertStreamStateToText(state),
+ (unsigned long long) framesWritten,
+ (unsigned long long) framesRead,
+ xRuns);
+
+ if (framesWritten < framesRead) {
+ printf("WARNING - UNDERFLOW - diff = %d !!!!!!!!!!!!\n",
+ (int) (framesWritten - framesRead));
+ }
+ return true;
+ }
+
+ aaudio_result_t invoke() {
+ aaudio_result_t result = AAUDIO_OK;
+ if (!isOpen()) {
+ result = open();
+ if (result != AAUDIO_OK) return result;
+ }
+
+ if (!validate()) {
+ return -1;
+ }
+
+ double dice = nextRandomDouble();
+ // Select an action based on a weighted probability.
+ if (dice < PROB_START) {
+ printf("start\n");
+ result = AAudioStream_requestStart(getStream());
+ } else if (dice < PROB_PAUSE) {
+ printf("pause\n");
+ result = AAudioStream_requestPause(getStream());
+ } else if (dice < PROB_FLUSH) {
+ printf("flush\n");
+ result = AAudioStream_requestFlush(getStream());
+ } else if (dice < PROB_STOP) {
+ printf("stop\n");
+ result = AAudioStream_requestStop(getStream());
+ } else if (dice < PROB_CLOSE) {
+ printf("close\n");
+ result = close();
+ } else {
+ printf("do nothing\n");
+ }
+
+ if (result == AAUDIO_ERROR_INVALID_STATE) {
+ printf(" got AAUDIO_ERROR_INVALID_STATE - expected from a monkey\n");
+ result = AAUDIO_OK;
+ }
+ if (result == AAUDIO_OK && isOpen()) {
+ if (!validate()) {
+ result = -1;
+ }
+ }
+ return result;
+ }
+
+ aaudio_data_callback_result_t renderAudio(
+ AAudioStream *stream,
+ void *audioData,
+ int32_t numFrames) {
+
+ int32_t samplesPerFrame = AAudioStream_getChannelCount(stream);
+ // This code only plays on the first one or two channels.
+ // TODO Support arbitrary number of channels.
+ switch (AAudioStream_getFormat(stream)) {
+ case AAUDIO_FORMAT_PCM_I16: {
+ int16_t *audioBuffer = (int16_t *) audioData;
+ // Render sine waves as shorts to first channel.
+ mSine1.render(&audioBuffer[0], samplesPerFrame, numFrames);
+ // Render sine waves to second channel if there is one.
+ if (samplesPerFrame > 1) {
+ mSine2.render(&audioBuffer[1], samplesPerFrame, numFrames);
+ }
+ }
+ break;
+ case AAUDIO_FORMAT_PCM_FLOAT: {
+ float *audioBuffer = (float *) audioData;
+ // Render sine waves as floats to first channel.
+ mSine1.render(&audioBuffer[0], samplesPerFrame, numFrames);
+ // Render sine waves to second channel if there is one.
+ if (samplesPerFrame > 1) {
+ mSine2.render(&audioBuffer[1], samplesPerFrame, numFrames);
+ }
+ }
+ break;
+ default:
+ return AAUDIO_CALLBACK_RESULT_STOP;
+ }
+ return AAUDIO_CALLBACK_RESULT_CONTINUE;
+ }
+
+private:
+ const AAudioArgsParser *mArgParser;
+ const int mIndex;
+ SineGenerator mSine1;
+ SineGenerator mSine2;
+};
+
+// Callback function that fills the audio output buffer.
+aaudio_data_callback_result_t AAudioMonkeyDataCallback(
+ AAudioStream *stream,
+ void *userData,
+ void *audioData,
+ int32_t numFrames
+) {
+ // should not happen but just in case...
+ if (userData == nullptr) {
+ printf("ERROR - AAudioMonkeyDataCallback needs userData\n");
+ return AAUDIO_CALLBACK_RESULT_STOP;
+ }
+ AAudioMonkey *monkey = (AAudioMonkey *) userData;
+ return monkey->renderAudio(stream, audioData, numFrames);
+}
+
+
+static void usage() {
+ AAudioArgsParser::usage();
+ printf(" -i{seed} Initial random seed\n");
+ printf(" -t{count} number of monkeys in the Troop\n");
+}
+
+int main(int argc, const char **argv) {
+ AAudioArgsParser argParser;
+ std::vector<AAudioMonkey> monkeys;
+ aaudio_result_t result;
+ int numMonkeys = 1;
+
+ // Make printf print immediately so that debug info is not stuck
+ // in a buffer if we hang or crash.
+ setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+
+ printf("%s - Monkeys\n", argv[0]);
+
+ long int seed = (long int)getNanoseconds(); // different every time by default
+
+ for (int i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (argParser.parseArg(arg)) {
+ // Handle options that are not handled by the ArgParser
+ if (arg[0] == '-') {
+ char option = arg[1];
+ switch (option) {
+ case 'i':
+ seed = atol(&arg[2]);
+ break;
+ case 't':
+ numMonkeys = atoi(&arg[2]);
+ break;
+ default:
+ usage();
+ exit(EXIT_FAILURE);
+ break;
+ }
+ } else {
+ usage();
+ exit(EXIT_FAILURE);
+ break;
+ }
+ }
+ }
+
+ srand48(seed);
+ printf("seed = %ld, nextRandomDouble() = %f\n", seed, nextRandomDouble());
+
+ for (int m = 0; m < numMonkeys; m++) {
+ monkeys.emplace_back(m, &argParser);
+ }
+
+ for (int i = 0; i < NUM_LOOPS; i++) {
+ // pick a random monkey and invoke it
+ double dice = nextRandomDouble();
+ int monkeyIndex = floor(dice * numMonkeys);
+ printf("----------- Monkey #%d\n", monkeyIndex);
+ result = monkeys[monkeyIndex].invoke();
+ if (result != AAUDIO_OK) {
+ goto error;
+ }
+
+ // sleep some random time
+ dice = nextRandomDouble();
+ dice = dice * dice * dice; // skew towards smaller delays
+ int micros = (int) (dice * MAX_MICROS_DELAY);
+ usleep(micros);
+
+ // TODO consider making this multi-threaded, one thread per monkey, to catch more bugs
+ }
+
+ printf("PASS\n");
+ return EXIT_SUCCESS;
+
+error:
+ printf("FAIL - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
+ usleep(1000 * 1000); // give me time to stop the logcat
+ return EXIT_FAILURE;
+}
+
diff --git a/media/libaaudio/tests/test_linear_ramp.cpp b/media/libaaudio/tests/test_linear_ramp.cpp
index 5c53982..93226ba 100644
--- a/media/libaaudio/tests/test_linear_ramp.cpp
+++ b/media/libaaudio/tests/test_linear_ramp.cpp
@@ -15,13 +15,13 @@
*/
#include <iostream>
+#include <math.h>
#include <gtest/gtest.h>
#include "utility/AAudioUtilities.h"
#include "utility/LinearRamp.h"
-
TEST(test_linear_ramp, linear_ramp_segments) {
LinearRamp ramp;
const float source[4] = {1.0f, 1.0f, 1.0f, 1.0f };
@@ -32,40 +32,40 @@
ramp.setLengthInFrames(8);
ramp.setTarget(8.0f);
- ASSERT_EQ(8, ramp.getLengthInFrames());
+ EXPECT_EQ(8, ramp.getLengthInFrames());
bool ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
- ASSERT_EQ(1, ramping);
- ASSERT_EQ(0.0f, levelFrom);
- ASSERT_EQ(4.0f, levelTo);
+ EXPECT_EQ(1, ramping);
+ EXPECT_EQ(0.0f, levelFrom);
+ EXPECT_EQ(4.0f, levelTo);
AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
- ASSERT_EQ(0.0f, destination[0]);
- ASSERT_EQ(1.0f, destination[1]);
- ASSERT_EQ(2.0f, destination[2]);
- ASSERT_EQ(3.0f, destination[3]);
+ EXPECT_EQ(0.0f, destination[0]);
+ EXPECT_EQ(1.0f, destination[1]);
+ EXPECT_EQ(2.0f, destination[2]);
+ EXPECT_EQ(3.0f, destination[3]);
ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
- ASSERT_EQ(1, ramping);
- ASSERT_EQ(4.0f, levelFrom);
- ASSERT_EQ(8.0f, levelTo);
+ EXPECT_EQ(1, ramping);
+ EXPECT_EQ(4.0f, levelFrom);
+ EXPECT_EQ(8.0f, levelTo);
AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
- ASSERT_EQ(4.0f, destination[0]);
- ASSERT_EQ(5.0f, destination[1]);
- ASSERT_EQ(6.0f, destination[2]);
- ASSERT_EQ(7.0f, destination[3]);
+ EXPECT_EQ(4.0f, destination[0]);
+ EXPECT_EQ(5.0f, destination[1]);
+ EXPECT_EQ(6.0f, destination[2]);
+ EXPECT_EQ(7.0f, destination[3]);
ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
- ASSERT_EQ(0, ramping);
- ASSERT_EQ(8.0f, levelFrom);
- ASSERT_EQ(8.0f, levelTo);
+ EXPECT_EQ(0, ramping);
+ EXPECT_EQ(8.0f, levelFrom);
+ EXPECT_EQ(8.0f, levelTo);
AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
- ASSERT_EQ(8.0f, destination[0]);
- ASSERT_EQ(8.0f, destination[1]);
- ASSERT_EQ(8.0f, destination[2]);
- ASSERT_EQ(8.0f, destination[3]);
+ EXPECT_EQ(8.0f, destination[0]);
+ EXPECT_EQ(8.0f, destination[1]);
+ EXPECT_EQ(8.0f, destination[2]);
+ EXPECT_EQ(8.0f, destination[3]);
};
@@ -80,29 +80,101 @@
ramp.setLengthInFrames(4);
ramp.setTarget(8.0f);
ramp.forceCurrent(4.0f);
- ASSERT_EQ(4.0f, ramp.getCurrent());
+ EXPECT_EQ(4.0f, ramp.getCurrent());
bool ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
- ASSERT_EQ(1, ramping);
- ASSERT_EQ(4.0f, levelFrom);
- ASSERT_EQ(8.0f, levelTo);
+ EXPECT_EQ(1, ramping);
+ EXPECT_EQ(4.0f, levelFrom);
+ EXPECT_EQ(8.0f, levelTo);
AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
- ASSERT_EQ(4.0f, destination[0]);
- ASSERT_EQ(5.0f, destination[1]);
- ASSERT_EQ(6.0f, destination[2]);
- ASSERT_EQ(7.0f, destination[3]);
+ EXPECT_EQ(4.0f, destination[0]);
+ EXPECT_EQ(5.0f, destination[1]);
+ EXPECT_EQ(6.0f, destination[2]);
+ EXPECT_EQ(7.0f, destination[3]);
ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
- ASSERT_EQ(0, ramping);
- ASSERT_EQ(8.0f, levelFrom);
- ASSERT_EQ(8.0f, levelTo);
+ EXPECT_EQ(0, ramping);
+ EXPECT_EQ(8.0f, levelFrom);
+ EXPECT_EQ(8.0f, levelTo);
AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
- ASSERT_EQ(8.0f, destination[0]);
- ASSERT_EQ(8.0f, destination[1]);
- ASSERT_EQ(8.0f, destination[2]);
- ASSERT_EQ(8.0f, destination[3]);
+ EXPECT_EQ(8.0f, destination[0]);
+ EXPECT_EQ(8.0f, destination[1]);
+ EXPECT_EQ(8.0f, destination[2]);
+ EXPECT_EQ(8.0f, destination[3]);
};
+constexpr int16_t kMaxI16 = INT16_MAX;
+constexpr int16_t kMinI16 = INT16_MIN;
+constexpr int16_t kHalfI16 = 16384;
+constexpr int16_t kTenthI16 = 3277;
+
+//void AAudioConvert_floatToPcm16(const float *source,
+// int16_t *destination,
+// int32_t numSamples,
+// float amplitude);
+TEST(test_linear_ramp, float_to_i16) {
+ const float source[] = {12345.6f, 1.0f, 0.5f, 0.1f, 0.0f, -0.1f, -0.5f, -1.0f, -12345.6f};
+ constexpr size_t count = sizeof(source) / sizeof(source[0]);
+ int16_t destination[count];
+ const int16_t expected[count] = {kMaxI16, kMaxI16, kHalfI16, kTenthI16, 0,
+ -kTenthI16, -kHalfI16, kMinI16, kMinI16};
+
+ AAudioConvert_floatToPcm16(source, destination, count, 1.0f);
+ for (size_t i = 0; i < count; i++) {
+ EXPECT_EQ(expected[i], destination[i]);
+ }
+
+}
+
+//void AAudioConvert_pcm16ToFloat(const int16_t *source,
+// float *destination,
+// int32_t numSamples,
+// float amplitude);
+TEST(test_linear_ramp, i16_to_float) {
+ const int16_t source[] = {kMaxI16, kHalfI16, kTenthI16, 0,
+ -kTenthI16, -kHalfI16, kMinI16};
+ constexpr size_t count = sizeof(source) / sizeof(source[0]);
+ float destination[count];
+ const float expected[count] = {(32767.0f / 32768.0f), 0.5f, 0.1f, 0.0f, -0.1f, -0.5f, -1.0f};
+
+ AAudioConvert_pcm16ToFloat(source, destination, count, 1.0f);
+ for (size_t i = 0; i < count; i++) {
+ EXPECT_NEAR(expected[i], destination[i], 0.0001f);
+ }
+
+}
+
+//void AAudio_linearRamp(const int16_t *source,
+// int16_t *destination,
+// int32_t numFrames,
+// int32_t samplesPerFrame,
+// float amplitude1,
+// float amplitude2);
+TEST(test_linear_ramp, ramp_i16_to_i16) {
+ const int16_t source[] = {1, 1, 1, 1, 1, 1, 1, 1};
+ constexpr size_t count = sizeof(source) / sizeof(source[0]);
+ int16_t destination[count];
+ // Ramp will sweep from -1 to almost +1
+ const int16_t expected[count] = {
+ -1, // from -1.00
+ -1, // from -0.75
+ -1, // from -0.55, round away from zero
+ 0, // from -0.25, round up to zero
+ 0, // from 0.00
+ 0, // from 0.25, round down to zero
+ 1, // from 0.50, round away from zero
+ 1 // from 0.75
+ };
+
+ // sweep across zero to test symmetry
+ constexpr float amplitude1 = -1.0;
+ constexpr float amplitude2 = 1.0;
+ AAudio_linearRamp(source, destination, count, 1, amplitude1, amplitude2);
+ for (size_t i = 0; i < count; i++) {
+ EXPECT_EQ(expected[i], destination[i]);
+ }
+
+}
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index 98e8d95..bedde43 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -20,7 +20,7 @@
// The headers for these interfaces will be available to any modules that
// include libaudioclient, at the path "aidl/package/path/BnFoo.h"
"aidl/android/media/IAudioRecord.aidl",
- "aidl/android/media/IPlayer.aidl",
+ ":libaudioclient_aidl",
"AudioEffect.cpp",
"AudioPolicy.cpp",
@@ -70,3 +70,11 @@
],
},
}
+
+// AIDL interface between libaudioclient and framework.jar
+filegroup {
+ name: "libaudioclient_aidl",
+ srcs: [
+ "aidl/android/media/IPlayer.aidl",
+ ],
+}
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 30f97ac..c8fa618 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -197,7 +197,7 @@
mPreviousSchedulingGroup(SP_DEFAULT),
mPausedPosition(0)
{
- mStatus = set(streamType, sampleRate, format, channelMask,
+ (void)set(streamType, sampleRate, format, channelMask,
frameCount, flags, cbf, user, notificationFrames,
0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
offloadInfo, uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
@@ -228,7 +228,7 @@
mPausedPosition(0),
mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
{
- mStatus = set(streamType, sampleRate, format, channelMask,
+ (void)set(streamType, sampleRate, format, channelMask,
0 /*frameCount*/, flags, cbf, user, notificationFrames,
sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed);
@@ -284,6 +284,11 @@
float maxRequiredSpeed,
audio_port_handle_t selectedDeviceId)
{
+ status_t status;
+ uint32_t channelCount;
+ pid_t callingPid;
+ pid_t myPid;
+
ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
"flags #%x, notificationFrames %d, sessionId %d, transferType %d, uid %d, pid %d",
streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
@@ -306,25 +311,29 @@
case TRANSFER_CALLBACK:
if (cbf == NULL || sharedBuffer != 0) {
ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
break;
case TRANSFER_OBTAIN:
case TRANSFER_SYNC:
if (sharedBuffer != 0) {
ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
break;
case TRANSFER_SHARED:
if (sharedBuffer == 0) {
ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
break;
default:
ALOGE("Invalid transfer type %d", transferType);
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
mSharedBuffer = sharedBuffer;
mTransfer = transferType;
@@ -338,7 +347,8 @@
// invariant that mAudioTrack != 0 is true only after set() returns successfully
if (mAudioTrack != 0) {
ALOGE("Track already in use");
- return INVALID_OPERATION;
+ status = INVALID_OPERATION;
+ goto exit;
}
// handle default values first.
@@ -348,7 +358,8 @@
if (pAttributes == NULL) {
if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
ALOGE("Invalid stream type %d", streamType);
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
mStreamType = streamType;
@@ -380,16 +391,18 @@
// validate parameters
if (!audio_is_valid_format(format)) {
ALOGE("Invalid format %#x", format);
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
mFormat = format;
if (!audio_is_output_channel(channelMask)) {
ALOGE("Invalid channel mask %#x", channelMask);
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
mChannelMask = channelMask;
- uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
+ channelCount = audio_channel_count_from_out_mask(channelMask);
mChannelCount = channelCount;
// force direct flag if format is not linear PCM
@@ -424,7 +437,8 @@
// sampling rate must be specified for direct outputs
if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
mSampleRate = sampleRate;
mOriginalSampleRate = sampleRate;
@@ -455,12 +469,14 @@
if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
ALOGE("notificationFrames=%d not permitted for non-fast track",
notificationFrames);
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
if (frameCount > 0) {
ALOGE("notificationFrames=%d not permitted with non-zero frameCount=%zu",
notificationFrames, frameCount);
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
mNotificationFramesReq = 0;
const uint32_t minNotificationsPerBuffer = 1;
@@ -472,15 +488,15 @@
notificationFrames, minNotificationsPerBuffer, maxNotificationsPerBuffer);
}
mNotificationFramesAct = 0;
- int callingpid = IPCThreadState::self()->getCallingPid();
- int mypid = getpid();
- if (uid == AUDIO_UID_INVALID || (callingpid != mypid)) {
+ callingPid = IPCThreadState::self()->getCallingPid();
+ myPid = getpid();
+ if (uid == AUDIO_UID_INVALID || (callingPid != myPid)) {
mClientUid = IPCThreadState::self()->getCallingUid();
} else {
mClientUid = uid;
}
- if (pid == -1 || (callingpid != mypid)) {
- mClientPid = callingpid;
+ if (pid == -1 || (callingPid != myPid)) {
+ mClientPid = callingPid;
} else {
mClientPid = pid;
}
@@ -495,7 +511,7 @@
}
// create the IAudioTrack
- status_t status = createTrack_l();
+ status = createTrack_l();
if (status != NO_ERROR) {
if (mAudioTrackThread != 0) {
@@ -503,10 +519,9 @@
mAudioTrackThread->requestExitAndWait();
mAudioTrackThread.clear();
}
- return status;
+ goto exit;
}
- mStatus = NO_ERROR;
mUserData = user;
mLoopCount = 0;
mLoopStart = 0;
@@ -534,7 +549,10 @@
mFramesWrittenServerOffset = 0;
mFramesWrittenAtRestore = -1; // -1 is a unique initializer.
mVolumeHandler = new media::VolumeHandler();
- return NO_ERROR;
+
+exit:
+ mStatus = status;
+ return status;
}
// -------------------------------------------------------------------------
@@ -1278,15 +1296,16 @@
status_t AudioTrack::createTrack_l()
{
+ status_t status;
+ bool callbackAdded = false;
+
const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
if (audioFlinger == 0) {
ALOGE("Could not get audioflinger");
- return NO_INIT;
+ status = NO_INIT;
+ goto exit;
}
- status_t status;
- bool callbackAdded = false;
-
{
// mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
// After fast request is denied, we will request again if IAudioTrack is re-created.
@@ -1355,7 +1374,10 @@
if (status != NO_ERROR || output.outputId == AUDIO_IO_HANDLE_NONE) {
ALOGE("AudioFlinger could not create track, status: %d output %d", status, output.outputId);
- goto error;
+ if (status == NO_ERROR) {
+ status = NO_INIT;
+ }
+ goto exit;
}
ALOG_ASSERT(track != 0);
@@ -1383,13 +1405,13 @@
if (iMem == 0) {
ALOGE("Could not get control block");
status = NO_INIT;
- goto error;
+ goto exit;
}
void *iMemPointer = iMem->pointer();
if (iMemPointer == NULL) {
ALOGE("Could not get control block pointer");
status = NO_INIT;
- goto error;
+ goto exit;
}
// invariant that mAudioTrack != 0 is true only after set() returns successfully
if (mAudioTrack != 0) {
@@ -1443,7 +1465,7 @@
if (buffers == NULL) {
ALOGE("Could not get buffer pointer");
status = NO_INIT;
- goto error;
+ goto exit;
}
}
@@ -1486,17 +1508,15 @@
mDeathNotifier = new DeathNotifier(this);
IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);
- return NO_ERROR;
}
-error:
- if (callbackAdded) {
+exit:
+ if (status != NO_ERROR && callbackAdded) {
// note: mOutput is always valid is callbackAdded is true
AudioSystem::removeAudioDeviceCallback(this, mOutput);
}
- if (status == NO_ERROR) {
- status = NO_INIT;
- }
+
+ mStatus = status;
// sp<IAudioTrack> track destructor will cause releaseOutput() to be called by AudioFlinger
return status;
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 5cf2bdb..2976d08 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -22,6 +22,7 @@
#include <stdint.h>
#include <sys/types.h>
+#include <binder/IPCThreadState.h>
#include <binder/Parcel.h>
#include "IAudioFlinger.h"
@@ -899,6 +900,35 @@
status_t BnAudioFlinger::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
+ // make sure transactions reserved to AudioPolicyManager do not come from other processes
+ switch (code) {
+ case SET_STREAM_VOLUME:
+ case SET_STREAM_MUTE:
+ case SET_MODE:
+ case OPEN_OUTPUT:
+ case OPEN_DUPLICATE_OUTPUT:
+ case CLOSE_OUTPUT:
+ case SUSPEND_OUTPUT:
+ case RESTORE_OUTPUT:
+ case OPEN_INPUT:
+ case CLOSE_INPUT:
+ case INVALIDATE_STREAM:
+ case SET_VOICE_VOLUME:
+ case MOVE_EFFECTS:
+ case LOAD_HW_MODULE:
+ case LIST_AUDIO_PORTS:
+ case GET_AUDIO_PORT:
+ case CREATE_AUDIO_PATCH:
+ case RELEASE_AUDIO_PATCH:
+ case LIST_AUDIO_PATCHES:
+ case SET_AUDIO_PORT_CONFIG:
+ ALOGW("%s: transaction %d received from PID %d",
+ __func__, code, IPCThreadState::self()->getCallingPid());
+ return INVALID_OPERATION;
+ default:
+ break;
+ }
+
// Whitelist of relevant events to trigger log merging.
// Log merging should activate during audio activity of any kind. This are considered the
// most relevant events.
@@ -908,12 +938,8 @@
case OPEN_RECORD:
case SET_MASTER_VOLUME:
case SET_MASTER_MUTE:
- case SET_STREAM_VOLUME:
- case SET_STREAM_MUTE:
case SET_MIC_MUTE:
case SET_PARAMETERS:
- case OPEN_INPUT:
- case SET_VOICE_VOLUME:
case CREATE_EFFECT:
case SYSTEM_READY: {
requestLogMerge();
@@ -922,6 +948,7 @@
default:
break;
}
+
switch (code) {
case CREATE_TRACK: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
diff --git a/media/libaudioclient/IAudioPolicyService.cpp b/media/libaudioclient/IAudioPolicyService.cpp
index 0397eec..53bc1b7 100644
--- a/media/libaudioclient/IAudioPolicyService.cpp
+++ b/media/libaudioclient/IAudioPolicyService.cpp
@@ -22,6 +22,7 @@
#include <math.h>
#include <sys/types.h>
+#include <binder/IPCThreadState.h>
#include <binder/Parcel.h>
#include <media/AudioEffect.h>
@@ -831,10 +832,33 @@
// ----------------------------------------------------------------------
-
status_t BnAudioPolicyService::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
+ // make sure transactions reserved to AudioFlinger do not come from other processes
+ switch (code) {
+ case START_OUTPUT:
+ case STOP_OUTPUT:
+ case RELEASE_OUTPUT:
+ case GET_INPUT_FOR_ATTR:
+ case START_INPUT:
+ case STOP_INPUT:
+ case RELEASE_INPUT:
+ case GET_STRATEGY_FOR_STREAM:
+ case GET_OUTPUT_FOR_EFFECT:
+ case REGISTER_EFFECT:
+ case UNREGISTER_EFFECT:
+ case SET_EFFECT_ENABLED:
+ case GET_OUTPUT_FOR_ATTR:
+ case ACQUIRE_SOUNDTRIGGER_SESSION:
+ case RELEASE_SOUNDTRIGGER_SESSION:
+ ALOGW("%s: transaction %d received from PID %d",
+ __func__, code, IPCThreadState::self()->getCallingPid());
+ return INVALID_OPERATION;
+ default:
+ break;
+ }
+
switch (code) {
case SET_DEVICE_CONNECTION_STATE: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
@@ -928,6 +952,7 @@
bool hasAttributes = data.readInt32() != 0;
if (hasAttributes) {
data.read(&attr, sizeof(audio_attributes_t));
+ sanetizeAudioAttributes(&attr);
}
audio_session_t session = (audio_session_t)data.readInt32();
audio_stream_type_t stream = AUDIO_STREAM_DEFAULT;
@@ -993,6 +1018,7 @@
CHECK_INTERFACE(IAudioPolicyService, data, reply);
audio_attributes_t attr;
data.read(&attr, sizeof(audio_attributes_t));
+ sanetizeAudioAttributes(&attr);
audio_io_handle_t input = (audio_io_handle_t)data.readInt32();
audio_session_t session = (audio_session_t)data.readInt32();
pid_t pid = (pid_t)data.readInt32();
@@ -1368,6 +1394,7 @@
data.read(&source, sizeof(struct audio_port_config));
audio_attributes_t attributes;
data.read(&attributes, sizeof(audio_attributes_t));
+ sanetizeAudioAttributes(&attributes);
audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
status_t status = startAudioSource(&source, &attributes, &handle);
reply->writeInt32(status);
@@ -1418,6 +1445,15 @@
}
}
+void BnAudioPolicyService::sanetizeAudioAttributes(audio_attributes_t* attr)
+{
+ const size_t tagsMaxSize = AUDIO_ATTRIBUTES_TAGS_MAX_SIZE;
+ if (strnlen(attr->tags, tagsMaxSize) >= tagsMaxSize) {
+ android_errorWriteLog(0x534e4554, "68953950"); // SafetyNet logging
+ }
+ attr->tags[tagsMaxSize - 1] = '\0';
+}
+
// ----------------------------------------------------------------------------
} // namespace android
diff --git a/media/libaudioclient/include/media/IAudioPolicyService.h b/media/libaudioclient/include/media/IAudioPolicyService.h
index 7c88e57..5558b77 100644
--- a/media/libaudioclient/include/media/IAudioPolicyService.h
+++ b/media/libaudioclient/include/media/IAudioPolicyService.h
@@ -178,6 +178,8 @@
const Parcel& data,
Parcel* reply,
uint32_t flags = 0);
+private:
+ void sanetizeAudioAttributes(audio_attributes_t* attr);
};
// ----------------------------------------------------------------------------
diff --git a/media/libeffects/config/Android.bp b/media/libeffects/config/Android.bp
index 4398a91..3e88c7c 100644
--- a/media/libeffects/config/Android.bp
+++ b/media/libeffects/config/Android.bp
@@ -5,6 +5,11 @@
srcs: ["src/EffectsConfig.cpp"],
+ cflags: [
+ "-Wall",
+ "-Werror",
+ ],
+
shared_libs: [
"liblog",
"libtinyxml2",
diff --git a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.c b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.c
index c290aec..7b0f341 100644
--- a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.c
+++ b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.c
@@ -430,7 +430,15 @@
}
- if(bChange){
+ // During operating mode transition, there is a race condition where the mode
+ // is still LVEQNB_ON, but the effect is considered disabled in the upper layers.
+ // modeChange handles this special race condition.
+ const int /* bool */ modeChange = pParams->OperatingMode != OperatingModeSave
+ || (OperatingModeSave == LVEQNB_ON
+ && pInstance->bInOperatingModeTransition
+ && LVC_Mixer_GetTarget(&pInstance->BypassMixer.MixerStream[0]) == 0);
+
+ if (bChange || modeChange) {
/*
* If the sample rate has changed clear the history
@@ -462,8 +470,7 @@
LVEQNB_SetCoefficients(pInstance); /* Instance pointer */
}
- if(pParams->OperatingMode != OperatingModeSave)
- {
+ if (modeChange) {
if(pParams->OperatingMode == LVEQNB_ON)
{
#ifdef BUILD_FLOAT
@@ -479,6 +486,8 @@
else
{
/* Stay on the ON operating mode until the transition is done */
+ // This may introduce a state race condition if the effect is enabled again
+ // while in transition. This is fixed in the modeChange logic.
pInstance->Params.OperatingMode = LVEQNB_ON;
#ifdef BUILD_FLOAT
LVC_Mixer_SetTarget(&pInstance->BypassMixer.MixerStream[0], 0.0f);
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index 146e9e8..8ebae11 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -3330,14 +3330,19 @@
//ALOGV("\tEffect_process Not Calling process with %d effects enabled, %d called: Effect %d",
//pContext->pBundledContext->NumberEffectsEnabled,
//pContext->pBundledContext->NumberEffectsCalled, pContext->EffectType);
- // 2 is for stereo input
+
if (pContext->config.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
- for (size_t i=0; i < outBuffer->frameCount*2; i++){
- outBuffer->s16[i] =
- clamp16((LVM_INT32)outBuffer->s16[i] + (LVM_INT32)inBuffer->s16[i]);
+ for (size_t i = 0; i < outBuffer->frameCount * FCC_2; ++i){
+#ifdef NATIVE_FLOAT_BUFFER
+ outBuffer->f32[i] += inBuffer->f32[i];
+#else
+ outBuffer->s16[i] = clamp16((LVM_INT32)outBuffer->s16[i] + inBuffer->s16[i]);
+#endif
}
} else if (outBuffer->raw != inBuffer->raw) {
- memcpy(outBuffer->raw, inBuffer->raw, outBuffer->frameCount*sizeof(LVM_INT16)*2);
+ memcpy(outBuffer->raw,
+ inBuffer->raw,
+ outBuffer->frameCount * sizeof(effect_buffer_t) * FCC_2);
}
}
diff --git a/media/libeffects/visualizer/EffectVisualizer.cpp b/media/libeffects/visualizer/EffectVisualizer.cpp
index 0e82339..c33f9f5 100644
--- a/media/libeffects/visualizer/EffectVisualizer.cpp
+++ b/media/libeffects/visualizer/EffectVisualizer.cpp
@@ -594,7 +594,7 @@
deltaSmpl = CAPTURE_BUF_SIZE;
}
- int32_t capturePoint = pContext->mCaptureIdx - deltaSmpl;
+ int32_t capturePoint = (int32_t)pContext->mCaptureIdx - deltaSmpl;
// a negative capturePoint means we wrap the buffer.
if (capturePoint < 0) {
uint32_t size = -capturePoint;
diff --git a/media/libmedia/IMediaHTTPService.cpp b/media/libmedia/IMediaHTTPService.cpp
index 062a07a..74d8ee8 100644
--- a/media/libmedia/IMediaHTTPService.cpp
+++ b/media/libmedia/IMediaHTTPService.cpp
@@ -34,7 +34,7 @@
: BpInterface<IMediaHTTPService>(impl) {
}
- virtual sp<IMediaHTTPConnection> makeHTTPConnection() {
+ virtual sp<MediaHTTPConnection> makeHTTPConnection() {
Parcel data, reply;
data.writeInterfaceToken(
IMediaHTTPService::getInterfaceDescriptor());
diff --git a/media/libmedia/include/media/IMediaHTTPConnection.h b/media/libmedia/include/media/IMediaHTTPConnection.h
index 2a63eb7..0fb6bb1 100644
--- a/media/libmedia/include/media/IMediaHTTPConnection.h
+++ b/media/libmedia/include/media/IMediaHTTPConnection.h
@@ -19,16 +19,15 @@
#define I_MEDIA_HTTP_CONNECTION_H_
#include <binder/IInterface.h>
+#include <media/MediaHTTPConnection.h>
#include <media/stagefright/foundation/ABase.h>
#include <utils/KeyedVector.h>
namespace android {
-struct IMediaHTTPConnection;
-
/** MUST stay in sync with IMediaHTTPConnection.aidl */
-struct IMediaHTTPConnection : public IInterface {
+struct IMediaHTTPConnection : public MediaHTTPConnection, public IInterface {
DECLARE_META_INTERFACE(MediaHTTPConnection);
virtual bool connect(
diff --git a/media/libmedia/include/media/IMediaHTTPService.h b/media/libmedia/include/media/IMediaHTTPService.h
index f66d6c8..e948b78 100644
--- a/media/libmedia/include/media/IMediaHTTPService.h
+++ b/media/libmedia/include/media/IMediaHTTPService.h
@@ -19,18 +19,19 @@
#define I_MEDIA_HTTP_SERVICE_H_
#include <binder/IInterface.h>
+#include <media/MediaHTTPService.h>
#include <media/stagefright/foundation/ABase.h>
namespace android {
-struct IMediaHTTPConnection;
+struct MediaHTTPConnection;
/** MUST stay in sync with IMediaHTTPService.aidl */
-struct IMediaHTTPService : public IInterface {
+struct IMediaHTTPService : public MediaHTTPService, public IInterface {
DECLARE_META_INTERFACE(MediaHTTPService);
- virtual sp<IMediaHTTPConnection> makeHTTPConnection() = 0;
+ virtual sp<MediaHTTPConnection> makeHTTPConnection() = 0;
private:
DISALLOW_EVIL_CONSTRUCTORS(IMediaHTTPService);
diff --git a/media/libmedia/include/media/MediaHTTPConnection.h b/media/libmedia/include/media/MediaHTTPConnection.h
new file mode 100644
index 0000000..82a79e5
--- /dev/null
+++ b/media/libmedia/include/media/MediaHTTPConnection.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_HTTP_CONNECTION_H_
+
+#define MEDIA_HTTP_CONNECTION_H_
+
+#include <media/stagefright/foundation/ABase.h>
+#include <utils/KeyedVector.h>
+
+namespace android {
+
+struct MediaHTTPConnection : public virtual RefBase {
+ MediaHTTPConnection() {}
+
+ virtual bool connect(
+ const char *uri, const KeyedVector<String8, String8> *headers) = 0;
+
+ virtual void disconnect() = 0;
+ virtual ssize_t readAt(off64_t offset, void *data, size_t size) = 0;
+ virtual off64_t getSize() = 0;
+ virtual status_t getMIMEType(String8 *mimeType) = 0;
+ virtual status_t getUri(String8 *uri) = 0;
+
+private:
+ DISALLOW_EVIL_CONSTRUCTORS(MediaHTTPConnection);
+};
+
+} // namespace android
+
+#endif // MEDIA_HTTP_CONNECTION_H_
diff --git a/media/libmedia/include/media/MediaHTTPService.h b/media/libmedia/include/media/MediaHTTPService.h
new file mode 100644
index 0000000..6e9f125
--- /dev/null
+++ b/media/libmedia/include/media/MediaHTTPService.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_HTTP_SERVICE_H_
+
+#define MEDIA_HTTP_SERVICE_H_
+
+#include <media/stagefright/foundation/ABase.h>
+
+namespace android {
+
+struct MediaHTTPConnection;
+
+struct MediaHTTPService : public virtual RefBase {
+ MediaHTTPService() {}
+
+ virtual sp<MediaHTTPConnection> makeHTTPConnection() = 0;
+
+private:
+ DISALLOW_EVIL_CONSTRUCTORS(MediaHTTPService);
+};
+
+} // namespace android
+
+#endif // MEDIA_HTTP_SERVICE_H_
diff --git a/media/libmedia/include/media/MediaMetadataRetrieverInterface.h b/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
index fc9e53c..116b548 100644
--- a/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
+++ b/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
@@ -49,7 +49,7 @@
int index, int colorFormat, bool metaOnly) = 0;
virtual status_t getFrameAtIndex(
std::vector<VideoFrame*>* frames,
- int frameIndex, int numFrames, int colorFormat, bool metaOnly);
+ int frameIndex, int numFrames, int colorFormat, bool metaOnly) = 0;
virtual MediaAlbumArt* extractAlbumArt() = 0;
virtual const char* extractMetadata(int keyCode) = 0;
};
diff --git a/media/libmediametrics/MediaAnalyticsItem.cpp b/media/libmediametrics/MediaAnalyticsItem.cpp
index f7df2b4..6b063e8 100644
--- a/media/libmediametrics/MediaAnalyticsItem.cpp
+++ b/media/libmediametrics/MediaAnalyticsItem.cpp
@@ -214,12 +214,12 @@
return mPkgName;
}
-MediaAnalyticsItem &MediaAnalyticsItem::setPkgVersionCode(int32_t pkgVersionCode) {
+MediaAnalyticsItem &MediaAnalyticsItem::setPkgVersionCode(int64_t pkgVersionCode) {
mPkgVersionCode = pkgVersionCode;
return *this;
}
-int32_t MediaAnalyticsItem::getPkgVersionCode() const {
+int64_t MediaAnalyticsItem::getPkgVersionCode() const {
return mPkgVersionCode;
}
@@ -640,7 +640,7 @@
mPid = data.readInt32();
mUid = data.readInt32();
mPkgName = data.readCString();
- mPkgVersionCode = data.readInt32();
+ mPkgVersionCode = data.readInt64();
mSessionID = data.readInt64();
mFinalized = data.readInt32();
mTimestamp = data.readInt64();
@@ -687,7 +687,7 @@
data->writeInt32(mPid);
data->writeInt32(mUid);
data->writeCString(mPkgName.c_str());
- data->writeInt32(mPkgVersionCode);
+ data->writeInt64(mPkgVersionCode);
data->writeInt64(mSessionID);
data->writeInt32(mFinalized);
data->writeInt64(mTimestamp);
@@ -766,7 +766,7 @@
if (version >= PROTO_V1) {
result.append(mPkgName);
- snprintf(buffer, sizeof(buffer), ":%d:", mPkgVersionCode);
+ snprintf(buffer, sizeof(buffer), ":%" PRId64 ":", mPkgVersionCode);
result.append(buffer);
}
diff --git a/media/libmediametrics/include/MediaAnalyticsItem.h b/media/libmediametrics/include/MediaAnalyticsItem.h
index 5f9b916..ec9b660 100644
--- a/media/libmediametrics/include/MediaAnalyticsItem.h
+++ b/media/libmediametrics/include/MediaAnalyticsItem.h
@@ -173,8 +173,8 @@
MediaAnalyticsItem &setPkgName(AString);
AString getPkgName() const;
- MediaAnalyticsItem &setPkgVersionCode(int32_t);
- int32_t getPkgVersionCode() const;
+ MediaAnalyticsItem &setPkgVersionCode(int64_t);
+ int64_t getPkgVersionCode() const;
// our serialization code for binder calls
int32_t writeToParcel(Parcel *);
@@ -205,7 +205,7 @@
pid_t mPid;
uid_t mUid;
AString mPkgName;
- int32_t mPkgVersionCode;
+ int64_t mPkgVersionCode;
// let's reuse a binder connection
static sp<IMediaAnalyticsService> sAnalyticsService;
diff --git a/media/libnblog/PerformanceAnalysis.cpp b/media/libnblog/PerformanceAnalysis.cpp
index 478c460..f09e93d 100644
--- a/media/libnblog/PerformanceAnalysis.cpp
+++ b/media/libnblog/PerformanceAnalysis.cpp
@@ -230,6 +230,7 @@
}
// rounds value to precision based on log-distance from mean
+__attribute__((no_sanitize("signed-integer-overflow")))
inline double logRound(double x, double mean) {
// Larger values decrease range of high resolution and prevent overflow
// of a histogram on the console.
diff --git a/media/libstagefright/DataSourceFactory.cpp b/media/libstagefright/DataSourceFactory.cpp
index aee858c..54bf0cc 100644
--- a/media/libstagefright/DataSourceFactory.cpp
+++ b/media/libstagefright/DataSourceFactory.cpp
@@ -19,8 +19,8 @@
#include "include/HTTPBase.h"
#include "include/NuCachedSource2.h"
-#include <media/IMediaHTTPConnection.h>
-#include <media/IMediaHTTPService.h>
+#include <media/MediaHTTPConnection.h>
+#include <media/MediaHTTPService.h>
#include <media/stagefright/DataSourceFactory.h>
#include <media/stagefright/DataURISource.h>
#include <media/stagefright/FileSource.h>
@@ -31,7 +31,7 @@
// static
sp<DataSource> DataSourceFactory::CreateFromURI(
- const sp<IMediaHTTPService> &httpService,
+ const sp<MediaHTTPService> &httpService,
const char *uri,
const KeyedVector<String8, String8> *headers,
String8 *contentType,
@@ -50,7 +50,7 @@
}
if (httpSource == NULL) {
- sp<IMediaHTTPConnection> conn = httpService->makeHTTPConnection();
+ sp<MediaHTTPConnection> conn = httpService->makeHTTPConnection();
if (conn == NULL) {
ALOGE("Failed to make http connection from http service!");
return NULL;
@@ -101,12 +101,12 @@
return source->initCheck() != OK ? nullptr : source;
}
-sp<DataSource> DataSourceFactory::CreateMediaHTTP(const sp<IMediaHTTPService> &httpService) {
+sp<DataSource> DataSourceFactory::CreateMediaHTTP(const sp<MediaHTTPService> &httpService) {
if (httpService == NULL) {
return NULL;
}
- sp<IMediaHTTPConnection> conn = httpService->makeHTTPConnection();
+ sp<MediaHTTPConnection> conn = httpService->makeHTTPConnection();
if (conn == NULL) {
return NULL;
} else {
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index fa5f37ec..b529940 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -419,8 +419,11 @@
*done = (++mNumFramesDecoded >= mNumFrames);
+ if (outputFormat == NULL) {
+ return ERROR_MALFORMED;
+ }
+
int32_t width, height;
- CHECK(outputFormat != NULL);
CHECK(outputFormat->findInt32("width", &width));
CHECK(outputFormat->findInt32("height", &height));
@@ -540,8 +543,11 @@
status_t ImageDecoder::onOutputReceived(
const sp<MediaCodecBuffer> &videoFrameBuffer,
const sp<AMessage> &outputFormat, int64_t /*timeUs*/, bool *done) {
+ if (outputFormat == NULL) {
+ return ERROR_MALFORMED;
+ }
+
int32_t width, height;
- CHECK(outputFormat != NULL);
CHECK(outputFormat->findInt32("width", &width));
CHECK(outputFormat->findInt32("height", &height));
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 1fe5f60..8db00f0 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -2951,212 +2951,215 @@
mGotStartKeyFrame = true;
}
////////////////////////////////////////////////////////////////////////////////
- if (mStszTableEntries->count() == 0) {
- mFirstSampleTimeRealUs = systemTime() / 1000;
- mStartTimestampUs = timestampUs;
- mOwner->setStartTimestampUs(mStartTimestampUs);
- previousPausedDurationUs = mStartTimestampUs;
- }
- if (mResumed) {
- int64_t durExcludingEarlierPausesUs = timestampUs - previousPausedDurationUs;
- if (WARN_UNLESS(durExcludingEarlierPausesUs >= 0ll, "for %s track", trackName)) {
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
- int64_t pausedDurationUs = durExcludingEarlierPausesUs - mTrackDurationUs;
- if (WARN_UNLESS(pausedDurationUs >= lastDurationUs, "for %s track", trackName)) {
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
- previousPausedDurationUs += pausedDurationUs - lastDurationUs;
- mResumed = false;
- }
- TimestampDebugHelperEntry timestampDebugEntry;
- timestampUs -= previousPausedDurationUs;
- timestampDebugEntry.pts = timestampUs;
- if (WARN_UNLESS(timestampUs >= 0ll, "for %s track", trackName)) {
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
- if (mIsVideo) {
- /*
- * Composition time: timestampUs
- * Decoding time: decodingTimeUs
- * Composition time offset = composition time - decoding time
- */
- int64_t decodingTimeUs;
- CHECK(meta_data->findInt64(kKeyDecodingTime, &decodingTimeUs));
- decodingTimeUs -= previousPausedDurationUs;
-
- // ensure non-negative, monotonic decoding time
- if (mLastDecodingTimeUs < 0) {
- decodingTimeUs = std::max((int64_t)0, decodingTimeUs);
- } else {
- // increase decoding time by at least the larger vaule of 1 tick and
- // 0.1 milliseconds. This needs to take into account the possible
- // delta adjustment in DurationTicks in below.
- decodingTimeUs = std::max(mLastDecodingTimeUs +
- std::max(100, divUp(1000000, mTimeScale)), decodingTimeUs);
- }
-
- mLastDecodingTimeUs = decodingTimeUs;
- timestampDebugEntry.dts = decodingTimeUs;
- timestampDebugEntry.frameType = isSync ? "Key frame" : "Non-Key frame";
- // Insert the timestamp into the mTimestampDebugHelper
- if (mTimestampDebugHelper.size() >= kTimestampDebugCount) {
- mTimestampDebugHelper.pop_front();
- }
- mTimestampDebugHelper.push_back(timestampDebugEntry);
-
- cttsOffsetTimeUs =
- timestampUs + kMaxCttsOffsetTimeUs - decodingTimeUs;
- if (WARN_UNLESS(cttsOffsetTimeUs >= 0ll, "for %s track", trackName)) {
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
- timestampUs = decodingTimeUs;
- ALOGV("decoding time: %" PRId64 " and ctts offset time: %" PRId64,
- timestampUs, cttsOffsetTimeUs);
-
- // Update ctts box table if necessary
- currCttsOffsetTimeTicks =
- (cttsOffsetTimeUs * mTimeScale + 500000LL) / 1000000LL;
- if (WARN_UNLESS(currCttsOffsetTimeTicks <= 0x0FFFFFFFFLL, "for %s track", trackName)) {
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
+ if (!mIsHeic) {
if (mStszTableEntries->count() == 0) {
- // Force the first ctts table entry to have one single entry
- // so that we can do adjustment for the initial track start
- // time offset easily in writeCttsBox().
- lastCttsOffsetTimeTicks = currCttsOffsetTimeTicks;
- addOneCttsTableEntry(1, currCttsOffsetTimeTicks);
- cttsSampleCount = 0; // No sample in ctts box is pending
- } else {
- if (currCttsOffsetTimeTicks != lastCttsOffsetTimeTicks) {
- addOneCttsTableEntry(cttsSampleCount, lastCttsOffsetTimeTicks);
- lastCttsOffsetTimeTicks = currCttsOffsetTimeTicks;
- cttsSampleCount = 1; // One sample in ctts box is pending
+ mFirstSampleTimeRealUs = systemTime() / 1000;
+ mStartTimestampUs = timestampUs;
+ mOwner->setStartTimestampUs(mStartTimestampUs);
+ previousPausedDurationUs = mStartTimestampUs;
+ }
+
+ if (mResumed) {
+ int64_t durExcludingEarlierPausesUs = timestampUs - previousPausedDurationUs;
+ if (WARN_UNLESS(durExcludingEarlierPausesUs >= 0ll, "for %s track", trackName)) {
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
+ }
+
+ int64_t pausedDurationUs = durExcludingEarlierPausesUs - mTrackDurationUs;
+ if (WARN_UNLESS(pausedDurationUs >= lastDurationUs, "for %s track", trackName)) {
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
+ }
+
+ previousPausedDurationUs += pausedDurationUs - lastDurationUs;
+ mResumed = false;
+ }
+ TimestampDebugHelperEntry timestampDebugEntry;
+ timestampUs -= previousPausedDurationUs;
+ timestampDebugEntry.pts = timestampUs;
+ if (WARN_UNLESS(timestampUs >= 0ll, "for %s track", trackName)) {
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
+ }
+
+ if (mIsVideo) {
+ /*
+ * Composition time: timestampUs
+ * Decoding time: decodingTimeUs
+ * Composition time offset = composition time - decoding time
+ */
+ int64_t decodingTimeUs;
+ CHECK(meta_data->findInt64(kKeyDecodingTime, &decodingTimeUs));
+ decodingTimeUs -= previousPausedDurationUs;
+
+ // ensure non-negative, monotonic decoding time
+ if (mLastDecodingTimeUs < 0) {
+ decodingTimeUs = std::max((int64_t)0, decodingTimeUs);
} else {
- ++cttsSampleCount;
+ // increase decoding time by at least the larger vaule of 1 tick and
+ // 0.1 milliseconds. This needs to take into account the possible
+ // delta adjustment in DurationTicks in below.
+ decodingTimeUs = std::max(mLastDecodingTimeUs +
+ std::max(100, divUp(1000000, mTimeScale)), decodingTimeUs);
}
- }
- // Update ctts time offset range
- if (mStszTableEntries->count() == 0) {
- mMinCttsOffsetTicks = currCttsOffsetTimeTicks;
- mMaxCttsOffsetTicks = currCttsOffsetTimeTicks;
- } else {
- if (currCttsOffsetTimeTicks > mMaxCttsOffsetTicks) {
- mMaxCttsOffsetTicks = currCttsOffsetTimeTicks;
- } else if (currCttsOffsetTimeTicks < mMinCttsOffsetTicks) {
+ mLastDecodingTimeUs = decodingTimeUs;
+ timestampDebugEntry.dts = decodingTimeUs;
+ timestampDebugEntry.frameType = isSync ? "Key frame" : "Non-Key frame";
+ // Insert the timestamp into the mTimestampDebugHelper
+ if (mTimestampDebugHelper.size() >= kTimestampDebugCount) {
+ mTimestampDebugHelper.pop_front();
+ }
+ mTimestampDebugHelper.push_back(timestampDebugEntry);
+
+ cttsOffsetTimeUs =
+ timestampUs + kMaxCttsOffsetTimeUs - decodingTimeUs;
+ if (WARN_UNLESS(cttsOffsetTimeUs >= 0ll, "for %s track", trackName)) {
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
+ }
+
+ timestampUs = decodingTimeUs;
+ ALOGV("decoding time: %" PRId64 " and ctts offset time: %" PRId64,
+ timestampUs, cttsOffsetTimeUs);
+
+ // Update ctts box table if necessary
+ currCttsOffsetTimeTicks =
+ (cttsOffsetTimeUs * mTimeScale + 500000LL) / 1000000LL;
+ if (WARN_UNLESS(currCttsOffsetTimeTicks <= 0x0FFFFFFFFLL, "for %s track", trackName)) {
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
+ }
+
+ if (mStszTableEntries->count() == 0) {
+ // Force the first ctts table entry to have one single entry
+ // so that we can do adjustment for the initial track start
+ // time offset easily in writeCttsBox().
+ lastCttsOffsetTimeTicks = currCttsOffsetTimeTicks;
+ addOneCttsTableEntry(1, currCttsOffsetTimeTicks);
+ cttsSampleCount = 0; // No sample in ctts box is pending
+ } else {
+ if (currCttsOffsetTimeTicks != lastCttsOffsetTimeTicks) {
+ addOneCttsTableEntry(cttsSampleCount, lastCttsOffsetTimeTicks);
+ lastCttsOffsetTimeTicks = currCttsOffsetTimeTicks;
+ cttsSampleCount = 1; // One sample in ctts box is pending
+ } else {
+ ++cttsSampleCount;
+ }
+ }
+
+ // Update ctts time offset range
+ if (mStszTableEntries->count() == 0) {
mMinCttsOffsetTicks = currCttsOffsetTimeTicks;
- mMinCttsOffsetTimeUs = cttsOffsetTimeUs;
+ mMaxCttsOffsetTicks = currCttsOffsetTimeTicks;
+ } else {
+ if (currCttsOffsetTimeTicks > mMaxCttsOffsetTicks) {
+ mMaxCttsOffsetTicks = currCttsOffsetTimeTicks;
+ } else if (currCttsOffsetTimeTicks < mMinCttsOffsetTicks) {
+ mMinCttsOffsetTicks = currCttsOffsetTimeTicks;
+ mMinCttsOffsetTimeUs = cttsOffsetTimeUs;
+ }
}
}
- }
- if (mOwner->isRealTimeRecording()) {
- if (mIsAudio) {
- updateDriftTime(meta_data);
- }
- }
-
- if (WARN_UNLESS(timestampUs >= 0ll, "for %s track", trackName)) {
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
- ALOGV("%s media time stamp: %" PRId64 " and previous paused duration %" PRId64,
- trackName, timestampUs, previousPausedDurationUs);
- if (timestampUs > mTrackDurationUs) {
- mTrackDurationUs = timestampUs;
- }
-
- // We need to use the time scale based ticks, rather than the
- // timestamp itself to determine whether we have to use a new
- // stts entry, since we may have rounding errors.
- // The calculation is intended to reduce the accumulated
- // rounding errors.
- currDurationTicks =
- ((timestampUs * mTimeScale + 500000LL) / 1000000LL -
- (lastTimestampUs * mTimeScale + 500000LL) / 1000000LL);
- if (currDurationTicks < 0ll) {
- ALOGE("do not support out of order frames (timestamp: %lld < last: %lld for %s track",
- (long long)timestampUs, (long long)lastTimestampUs, trackName);
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
- // if the duration is different for this sample, see if it is close enough to the previous
- // duration that we can fudge it and use the same value, to avoid filling the stts table
- // with lots of near-identical entries.
- // "close enough" here means that the current duration needs to be adjusted by less
- // than 0.1 milliseconds
- if (lastDurationTicks && (currDurationTicks != lastDurationTicks)) {
- int64_t deltaUs = ((lastDurationTicks - currDurationTicks) * 1000000LL
- + (mTimeScale / 2)) / mTimeScale;
- if (deltaUs > -100 && deltaUs < 100) {
- // use previous ticks, and adjust timestamp as if it was actually that number
- // of ticks
- currDurationTicks = lastDurationTicks;
- timestampUs += deltaUs;
- }
- }
- mStszTableEntries->add(htonl(sampleSize));
- if (mStszTableEntries->count() > 2) {
-
- // Force the first sample to have its own stts entry so that
- // we can adjust its value later to maintain the A/V sync.
- if (mStszTableEntries->count() == 3 || currDurationTicks != lastDurationTicks) {
- addOneSttsTableEntry(sampleCount, lastDurationTicks);
- sampleCount = 1;
- } else {
- ++sampleCount;
+ if (mOwner->isRealTimeRecording()) {
+ if (mIsAudio) {
+ updateDriftTime(meta_data);
+ }
}
- }
- if (mSamplesHaveSameSize) {
- if (mStszTableEntries->count() >= 2 && previousSampleSize != sampleSize) {
- mSamplesHaveSameSize = false;
+ if (WARN_UNLESS(timestampUs >= 0ll, "for %s track", trackName)) {
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
}
- previousSampleSize = sampleSize;
- }
- ALOGV("%s timestampUs/lastTimestampUs: %" PRId64 "/%" PRId64,
- trackName, timestampUs, lastTimestampUs);
- lastDurationUs = timestampUs - lastTimestampUs;
- lastDurationTicks = currDurationTicks;
- lastTimestampUs = timestampUs;
- if (isSync != 0) {
- addOneStssTableEntry(mStszTableEntries->count());
- }
-
- if (mTrackingProgressStatus) {
- if (mPreviousTrackTimeUs <= 0) {
- mPreviousTrackTimeUs = mStartTimestampUs;
+ ALOGV("%s media time stamp: %" PRId64 " and previous paused duration %" PRId64,
+ trackName, timestampUs, previousPausedDurationUs);
+ if (timestampUs > mTrackDurationUs) {
+ mTrackDurationUs = timestampUs;
}
- trackProgressStatus(timestampUs);
+
+ // We need to use the time scale based ticks, rather than the
+ // timestamp itself to determine whether we have to use a new
+ // stts entry, since we may have rounding errors.
+ // The calculation is intended to reduce the accumulated
+ // rounding errors.
+ currDurationTicks =
+ ((timestampUs * mTimeScale + 500000LL) / 1000000LL -
+ (lastTimestampUs * mTimeScale + 500000LL) / 1000000LL);
+ if (currDurationTicks < 0ll) {
+ ALOGE("do not support out of order frames (timestamp: %lld < last: %lld for %s track",
+ (long long)timestampUs, (long long)lastTimestampUs, trackName);
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
+ }
+
+ // if the duration is different for this sample, see if it is close enough to the previous
+ // duration that we can fudge it and use the same value, to avoid filling the stts table
+ // with lots of near-identical entries.
+ // "close enough" here means that the current duration needs to be adjusted by less
+ // than 0.1 milliseconds
+ if (lastDurationTicks && (currDurationTicks != lastDurationTicks)) {
+ int64_t deltaUs = ((lastDurationTicks - currDurationTicks) * 1000000LL
+ + (mTimeScale / 2)) / mTimeScale;
+ if (deltaUs > -100 && deltaUs < 100) {
+ // use previous ticks, and adjust timestamp as if it was actually that number
+ // of ticks
+ currDurationTicks = lastDurationTicks;
+ timestampUs += deltaUs;
+ }
+ }
+ mStszTableEntries->add(htonl(sampleSize));
+ if (mStszTableEntries->count() > 2) {
+
+ // Force the first sample to have its own stts entry so that
+ // we can adjust its value later to maintain the A/V sync.
+ if (mStszTableEntries->count() == 3 || currDurationTicks != lastDurationTicks) {
+ addOneSttsTableEntry(sampleCount, lastDurationTicks);
+ sampleCount = 1;
+ } else {
+ ++sampleCount;
+ }
+
+ }
+ if (mSamplesHaveSameSize) {
+ if (mStszTableEntries->count() >= 2 && previousSampleSize != sampleSize) {
+ mSamplesHaveSameSize = false;
+ }
+ previousSampleSize = sampleSize;
+ }
+ ALOGV("%s timestampUs/lastTimestampUs: %" PRId64 "/%" PRId64,
+ trackName, timestampUs, lastTimestampUs);
+ lastDurationUs = timestampUs - lastTimestampUs;
+ lastDurationTicks = currDurationTicks;
+ lastTimestampUs = timestampUs;
+
+ if (isSync != 0) {
+ addOneStssTableEntry(mStszTableEntries->count());
+ }
+
+ if (mTrackingProgressStatus) {
+ if (mPreviousTrackTimeUs <= 0) {
+ mPreviousTrackTimeUs = mStartTimestampUs;
+ }
+ trackProgressStatus(timestampUs);
+ }
}
if (!hasMultipleTracks) {
size_t bytesWritten;
@@ -4331,9 +4334,12 @@
}
// patch up the mPrimaryItemId and count items with prop associations
+ uint16_t firstVisibleItemId = 0;
for (size_t index = 0; index < mItems.size(); index++) {
if (mItems[index].isPrimary) {
mPrimaryItemId = mItems[index].itemId;
+ } else if (!firstVisibleItemId && !mItems[index].isHidden) {
+ firstVisibleItemId = mItems[index].itemId;
}
if (!mItems[index].properties.empty()) {
@@ -4342,8 +4348,13 @@
}
if (mPrimaryItemId == 0) {
- ALOGW("didn't find primary, using first item");
- mPrimaryItemId = mItems[0].itemId;
+ if (firstVisibleItemId > 0) {
+ ALOGW("didn't find primary, using first visible item");
+ mPrimaryItemId = firstVisibleItemId;
+ } else {
+ ALOGW("no primary and no visible item, using first item");
+ mPrimaryItemId = mItems[0].itemId;
+ }
}
beginBox("meta");
diff --git a/media/libstagefright/NuMediaExtractor.cpp b/media/libstagefright/NuMediaExtractor.cpp
index a176382..17c9648 100644
--- a/media/libstagefright/NuMediaExtractor.cpp
+++ b/media/libstagefright/NuMediaExtractor.cpp
@@ -72,7 +72,7 @@
}
status_t NuMediaExtractor::setDataSource(
- const sp<IMediaHTTPService> &httpService,
+ const sp<MediaHTTPService> &httpService,
const char *path,
const KeyedVector<String8, String8> *headers) {
Mutex::Autolock autoLock(mLock);
diff --git a/media/libstagefright/codec2/Android.bp b/media/libstagefright/codec2/Android.bp
index 2f0deb4..696a062 100644
--- a/media/libstagefright/codec2/Android.bp
+++ b/media/libstagefright/codec2/Android.bp
@@ -7,6 +7,11 @@
srcs: ["C2.cpp"],
+ cflags: [
+ "-Wall",
+ "-Werror",
+ ],
+
include_dirs: [
"frameworks/av/media/libstagefright/codec2/include",
"frameworks/native/include/media/hardware",
diff --git a/media/libstagefright/codec2/include/C2Component.h b/media/libstagefright/codec2/include/C2Component.h
index 2dbf7ea..a555b35 100644
--- a/media/libstagefright/codec2/include/C2Component.h
+++ b/media/libstagefright/codec2/include/C2Component.h
@@ -360,6 +360,7 @@
C2DomainKind domain; ///< component domain (e.g. audio or video)
C2ComponentKind type; ///< component type (e.g. encoder, decoder or filter)
C2StringLiteral mediaType; ///< media type supported by the component
+ C2ComponentPriority priority; ///< priority used to determine component ordering
/**
* name alias(es) for backward compatibility.
@@ -569,7 +570,6 @@
*/
virtual std::shared_ptr<C2ComponentInterface> intf() = 0;
-protected:
virtual ~C2Component() = default;
};
@@ -724,11 +724,11 @@
/**
* Returns the list of components supported by this component store.
*
- * This method may be momentarily blocking, but MUST return within 5ms.
+ * This method MUST return within 500ms.
*
* \retval vector of component information.
*/
- virtual std::vector<std::shared_ptr<const C2Component::Traits>> listComponents_sm() const = 0;
+ virtual std::vector<std::shared_ptr<const C2Component::Traits>> listComponents() = 0;
// -------------------------------------- UTILITY METHODS --------------------------------------
diff --git a/media/libstagefright/codec2/vndk/C2Store.cpp b/media/libstagefright/codec2/vndk/C2Store.cpp
index 73ffaea..460cc60 100644
--- a/media/libstagefright/codec2/vndk/C2Store.cpp
+++ b/media/libstagefright/codec2/vndk/C2Store.cpp
@@ -20,12 +20,26 @@
#include <C2Component.h>
#include <C2PlatformSupport.h>
+#define LOG_TAG "C2Store"
+#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <dlfcn.h>
+
#include <map>
#include <memory>
#include <mutex>
namespace android {
+/**
+ * The platform allocator store provides basic allocator-types for the framework based on ion and
+ * gralloc. Allocators are not meant to be updatable.
+ *
+ * \todo Provide allocator based on ashmem
+ * \todo Move ion allocation into its HIDL or provide some mapping from memory usage to ion flags
+ * \todo Make this allocator store extendable
+ */
class C2PlatformAllocatorStore : public C2AllocatorStore {
public:
enum : id_t {
@@ -37,9 +51,11 @@
/* ionmapper */
);
- virtual c2_status_t fetchAllocator(id_t id, std::shared_ptr<C2Allocator> *const allocator) override;
+ virtual c2_status_t fetchAllocator(
+ id_t id, std::shared_ptr<C2Allocator> *const allocator) override;
- virtual std::vector<std::shared_ptr<const C2Allocator::Traits>> listAllocators_nb() const override {
+ virtual std::vector<std::shared_ptr<const C2Allocator::Traits>> listAllocators_nb()
+ const override {
return std::vector<std::shared_ptr<const C2Allocator::Traits>>(); /// \todo
}
@@ -48,10 +64,10 @@
}
private:
- // returns a shared-singleton ion allocator
+ /// returns a shared-singleton ion allocator
std::shared_ptr<C2Allocator> fetchIonAllocator();
- // returns a shared-singleton gralloc allocator
+ /// returns a shared-singleton gralloc allocator
std::shared_ptr<C2Allocator> fetchGrallocAllocator();
};
@@ -141,4 +157,385 @@
return res;
}
-} // namespace android
\ No newline at end of file
+class C2PlatformComponentStore : public C2ComponentStore {
+public:
+ virtual std::vector<std::shared_ptr<const C2Component::Traits>> listComponents() override;
+ virtual std::shared_ptr<C2ParamReflector> getParamReflector() const override;
+ virtual C2String getName() const override;
+ virtual c2_status_t querySupportedValues_nb(
+ std::vector<C2FieldSupportedValuesQuery> &fields) const override;
+ virtual c2_status_t querySupportedParams_nb(
+ std::vector<std::shared_ptr<C2ParamDescriptor>> *const params) const override;
+ virtual c2_status_t query_sm(
+ const std::vector<C2Param *const> &stackParams,
+ const std::vector<C2Param::Index> &heapParamIndices,
+ std::vector<std::unique_ptr<C2Param>> *const heapParams) const override;
+ virtual c2_status_t createInterface(
+ C2String name, std::shared_ptr<C2ComponentInterface> *const interface) override;
+ virtual c2_status_t createComponent(
+ C2String name, std::shared_ptr<C2Component> *const component) override;
+ virtual c2_status_t copyBuffer(
+ std::shared_ptr<C2GraphicBuffer> src, std::shared_ptr<C2GraphicBuffer> dst) override;
+ virtual c2_status_t config_sm(
+ const std::vector<C2Param *const> ¶ms,
+ std::vector<std::unique_ptr<C2SettingResult>> *const failures) override;
+ virtual c2_status_t commit_sm(
+ const std::vector<C2Param *const> ¶ms,
+ std::vector<std::unique_ptr<C2SettingResult>> *const failures) override;
+
+ C2PlatformComponentStore();
+
+ virtual ~C2PlatformComponentStore() override = default;
+
+private:
+
+ /**
+ * An object encapsulating a loaded component module.
+ *
+ * \todo provide a way to add traits to known components here to avoid loading the .so-s
+ * for listComponents
+ */
+ struct ComponentModule : public C2ComponentFactory,
+ public std::enable_shared_from_this<ComponentModule> {
+ virtual c2_status_t createComponent(
+ c2_node_id_t id, std::shared_ptr<C2Component> *component,
+ ComponentDeleter deleter = std::default_delete<C2Component>()) override;
+ virtual c2_status_t createInterface(
+ c2_node_id_t id, std::shared_ptr<C2ComponentInterface> *interface,
+ InterfaceDeleter deleter = std::default_delete<C2ComponentInterface>()) override;
+
+ /**
+ * \returns the traits of the component in this module.
+ */
+ std::shared_ptr<const C2Component::Traits> getTraits();
+
+ /**
+ * Creates an uninitialized component module.
+ *
+ * \note Only used by ComponentLoader.
+ */
+ ComponentModule() : mInit(C2_NO_INIT) {}
+
+ /**
+ * Initializes a component module with a given library path. Must be called exactly once.
+ *
+ * \note Only used by ComponentLoader.
+ *
+ * \param libPath[in] library path (or name)
+ *
+ * \retval C2_OK the component module has been successfully loaded
+ * \retval C2_NO_MEMORY not enough memory to loading the component module
+ * \retval C2_NOT_FOUND could not locate the component module
+ * \retval C2_CORRUPTED the component module could not be loaded (unexpected)
+ * \retval C2_REFUSED permission denied to load the component module (unexpected)
+ * \retval C2_TIMED_OUT could not load the module within the time limit (unexpected)
+ */
+ c2_status_t init(std::string libPath);
+
+ virtual ~ComponentModule() override;
+
+ protected:
+ std::recursive_mutex mLock; ///< lock protecting mTraits
+ std::shared_ptr<C2Component::Traits> mTraits; ///< cached component traits
+
+ c2_status_t mInit; ///< initialization result
+
+ void *mLibHandle; ///< loaded library handle
+ C2ComponentFactory::CreateCodec2FactoryFunc createFactory; ///< loaded create function
+ C2ComponentFactory::DestroyCodec2FactoryFunc destroyFactory; ///< loaded destroy function
+ C2ComponentFactory *mComponentFactory; ///< loaded/created component factory
+ };
+
+ /**
+ * An object encapsulating a loadable component module.
+ *
+ * \todo make this also work for enumerations
+ */
+ struct ComponentLoader {
+ /**
+ * Load the component module.
+ *
+ * This method simply returns the component module if it is already currently loaded, or
+ * attempts to load it if it is not.
+ *
+ * \param module[out] pointer to the shared pointer where the loaded module shall be stored.
+ * This will be nullptr on error.
+ *
+ * \retval C2_OK the component module has been successfully loaded
+ * \retval C2_NO_MEMORY not enough memory to loading the component module
+ * \retval C2_NOT_FOUND could not locate the component module
+ * \retval C2_CORRUPTED the component module could not be loaded
+ * \retval C2_REFUSED permission denied to load the component module
+ */
+ c2_status_t fetchModule(std::shared_ptr<ComponentModule> *module) {
+ c2_status_t res = C2_OK;
+ std::lock_guard<std::mutex> lock(mMutex);
+ std::shared_ptr<ComponentModule> localModule = mModule.lock();
+ if (localModule == nullptr) {
+ localModule = std::make_shared<ComponentModule>();
+ res = localModule->init(mLibPath);
+ if (res == C2_OK) {
+ mModule = localModule;
+ }
+ }
+ *module = localModule;
+ return res;
+ }
+
+ /**
+ * Creates a component loader for a specific library path (or name).
+ */
+ ComponentLoader(std::string libPath)
+ : mLibPath(libPath) {}
+
+ private:
+ std::mutex mMutex; ///< mutex guarding the module
+ std::weak_ptr<ComponentModule> mModule; ///< weak reference to the loaded module
+ std::string mLibPath; ///< library path (or name)
+ };
+
+ /**
+ * Retrieves the component loader for a component.
+ *
+ * \return a non-ref-holding pointer to the component loader.
+ *
+ * \retval C2_OK the component loader has been successfully retrieved
+ * \retval C2_NO_MEMORY not enough memory to locate the component loader
+ * \retval C2_NOT_FOUND could not locate the component to be loaded
+ * \retval C2_CORRUPTED the component loader could not be identified due to some modules being
+ * corrupted (this can happen if the name does not refer to an already
+ * identified component but some components could not be loaded due to
+ * bad library)
+ * \retval C2_REFUSED permission denied to find the component loader for the named component
+ * (this can happen if the name does not refer to an already identified
+ * component but some components could not be loaded due to lack of
+ * permissions)
+ */
+ c2_status_t findComponent(C2String name, ComponentLoader **loader);
+
+ std::map<C2String, ComponentLoader> mComponents; ///< list of components
+};
+
+c2_status_t C2PlatformComponentStore::ComponentModule::init(std::string libPath) {
+ ALOGV("in %s", __func__);
+ ALOGV("loading dll");
+ mLibHandle = dlopen(libPath.c_str(), RTLD_NOW|RTLD_NODELETE);
+ if (mLibHandle == nullptr) {
+ // could be access/symbol or simply not being there
+ ALOGD("could not dlopen %s: %s", libPath.c_str(), dlerror());
+ mInit = C2_CORRUPTED;
+ } else {
+ createFactory =
+ (C2ComponentFactory::CreateCodec2FactoryFunc)dlsym(mLibHandle, "CreateCodec2Factory");
+ destroyFactory =
+ (C2ComponentFactory::DestroyCodec2FactoryFunc)dlsym(mLibHandle, "DestroyCodec2Factory");
+
+ mComponentFactory = createFactory();
+ if (mComponentFactory == nullptr) {
+ ALOGD("could not create factory in %s", libPath.c_str());
+ mInit = C2_NO_MEMORY;
+ } else {
+ mInit = C2_OK;
+ }
+ }
+ return mInit;
+}
+
+C2PlatformComponentStore::ComponentModule::~ComponentModule() {
+ ALOGV("in %s", __func__);
+ if (destroyFactory && mComponentFactory) {
+ destroyFactory(mComponentFactory);
+ }
+ if (mLibHandle) {
+ ALOGV("unloading dll");
+ dlclose(mLibHandle);
+ }
+}
+
+c2_status_t C2PlatformComponentStore::ComponentModule::createInterface(
+ c2_node_id_t id, std::shared_ptr<C2ComponentInterface> *interface,
+ std::function<void(::android::C2ComponentInterface*)> deleter) {
+ interface->reset();
+ if (mInit != C2_OK) {
+ return mInit;
+ }
+ std::shared_ptr<ComponentModule> module = shared_from_this();
+ c2_status_t res = mComponentFactory->createInterface(
+ id, interface, [module, deleter](C2ComponentInterface *p) mutable {
+ // capture module so that we ensure we still have it while deleting interface
+ deleter(p); // delete interface first
+ module.reset(); // remove module ref (not technically needed)
+ });
+ return res;
+}
+
+c2_status_t C2PlatformComponentStore::ComponentModule::createComponent(
+ c2_node_id_t id, std::shared_ptr<C2Component> *component,
+ std::function<void(::android::C2Component*)> deleter) {
+ component->reset();
+ if (mInit != C2_OK) {
+ return mInit;
+ }
+ std::shared_ptr<ComponentModule> module = shared_from_this();
+ c2_status_t res = mComponentFactory->createComponent(
+ id, component, [module, deleter](C2Component *p) mutable {
+ // capture module so that we ensure we still have it while deleting component
+ deleter(p); // delete component first
+ module.reset(); // remove module ref (not technically needed)
+ });
+ return res;
+}
+
+std::shared_ptr<const C2Component::Traits> C2PlatformComponentStore::ComponentModule::getTraits() {
+ std::unique_lock<std::recursive_mutex> lock(mLock);
+ if (!mTraits) {
+ std::shared_ptr<C2ComponentInterface> intf;
+ c2_status_t res = createInterface(0, &intf);
+ if (res != C2_OK) {
+ return nullptr;
+ }
+
+ std::shared_ptr<C2Component::Traits> traits(new (std::nothrow) C2Component::Traits);
+ if (traits) {
+ // traits->name = intf->getName();
+ }
+
+ mTraits = traits;
+ }
+ return mTraits;
+}
+
+C2PlatformComponentStore::C2PlatformComponentStore() {
+ // TODO: move this also into a .so so it can be updated
+ mComponents.emplace("c2.google.avc.decoder", "libstagefright_soft_c2avcdec.so");
+}
+
+c2_status_t C2PlatformComponentStore::copyBuffer(
+ std::shared_ptr<C2GraphicBuffer> src, std::shared_ptr<C2GraphicBuffer> dst) {
+ (void)src;
+ (void)dst;
+ return C2_OMITTED;
+}
+
+c2_status_t C2PlatformComponentStore::query_sm(
+ const std::vector<C2Param *const> &stackParams,
+ const std::vector<C2Param::Index> &heapParamIndices,
+ std::vector<std::unique_ptr<C2Param>> *const heapParams) const {
+ // there are no supported configs
+ (void)heapParams;
+ return stackParams.empty() && heapParamIndices.empty() ? C2_OK : C2_BAD_INDEX;
+}
+
+c2_status_t C2PlatformComponentStore::config_sm(
+ const std::vector<C2Param *const> ¶ms,
+ std::vector<std::unique_ptr<C2SettingResult>> *const failures) {
+ // there are no supported configs
+ (void)failures;
+ return params.empty() ? C2_OK : C2_BAD_INDEX;
+}
+
+c2_status_t C2PlatformComponentStore::commit_sm(
+ const std::vector<C2Param *const> ¶ms,
+ std::vector<std::unique_ptr<C2SettingResult>> *const failures) {
+ // there are no supported configs
+ (void)failures;
+ return params.empty() ? C2_OK : C2_BAD_INDEX;
+}
+
+std::vector<std::shared_ptr<const C2Component::Traits>> C2PlatformComponentStore::listComponents() {
+ // This method SHALL return within 500ms.
+ std::vector<std::shared_ptr<const C2Component::Traits>> list;
+ for (auto &it : mComponents) {
+ ComponentLoader &loader = it.second;
+ std::shared_ptr<ComponentModule> module;
+ c2_status_t res = loader.fetchModule(&module);
+ if (res == C2_OK) {
+ std::shared_ptr<const C2Component::Traits> traits = module->getTraits();
+ if (traits) {
+ list.push_back(traits);
+ }
+ }
+ }
+ return list;
+}
+
+c2_status_t C2PlatformComponentStore::findComponent(C2String name, ComponentLoader **loader) {
+ *loader = nullptr;
+ auto pos = mComponents.find(name);
+ // TODO: check aliases
+ if (pos == mComponents.end()) {
+ return C2_NOT_FOUND;
+ }
+ *loader = &pos->second;
+ return C2_OK;
+}
+
+c2_status_t C2PlatformComponentStore::createComponent(
+ C2String name, std::shared_ptr<C2Component> *const component) {
+ // This method SHALL return within 100ms.
+ component->reset();
+ ComponentLoader *loader;
+ c2_status_t res = findComponent(name, &loader);
+ if (res == C2_OK) {
+ std::shared_ptr<ComponentModule> module;
+ res = loader->fetchModule(&module);
+ if (res == C2_OK) {
+ // TODO: get a unique node ID
+ res = module->createComponent(0, component);
+ }
+ }
+ return res;
+}
+
+c2_status_t C2PlatformComponentStore::createInterface(
+ C2String name, std::shared_ptr<C2ComponentInterface> *const interface) {
+ // This method SHALL return within 100ms.
+ interface->reset();
+ ComponentLoader *loader;
+ c2_status_t res = findComponent(name, &loader);
+ if (res == C2_OK) {
+ std::shared_ptr<ComponentModule> module;
+ res = loader->fetchModule(&module);
+ if (res == C2_OK) {
+ // TODO: get a unique node ID
+ res = module->createInterface(0, interface);
+ }
+ }
+ return res;
+}
+
+c2_status_t C2PlatformComponentStore::querySupportedParams_nb(
+ std::vector<std::shared_ptr<C2ParamDescriptor>> *const params) const {
+ // there are no supported config params
+ (void)params;
+ return C2_OK;
+}
+
+c2_status_t C2PlatformComponentStore::querySupportedValues_nb(
+ std::vector<C2FieldSupportedValuesQuery> &fields) const {
+ // there are no supported config params
+ return fields.empty() ? C2_OK : C2_BAD_INDEX;
+}
+
+C2String C2PlatformComponentStore::getName() const {
+ return "android.componentStore.platform";
+}
+
+std::shared_ptr<C2ParamReflector> C2PlatformComponentStore::getParamReflector() const {
+ // TODO
+ return nullptr;
+}
+
+std::shared_ptr<C2ComponentStore> GetCodec2PlatformComponentStore() {
+ static std::mutex mutex;
+ static std::weak_ptr<C2ComponentStore> platformStore;
+ std::lock_guard<std::mutex> lock(mutex);
+ std::shared_ptr<C2ComponentStore> store = platformStore.lock();
+ if (store == nullptr) {
+ store = std::make_shared<C2PlatformComponentStore>();
+ platformStore = store;
+ }
+ return store;
+}
+
+} // namespace android
diff --git a/media/libstagefright/codec2/vndk/include/C2PlatformSupport.h b/media/libstagefright/codec2/vndk/include/C2PlatformSupport.h
index 8e45705..2281dab 100644
--- a/media/libstagefright/codec2/vndk/include/C2PlatformSupport.h
+++ b/media/libstagefright/codec2/vndk/include/C2PlatformSupport.h
@@ -19,6 +19,7 @@
#include <C2Component.h>
+#include <functional>
#include <memory>
namespace android {
@@ -64,14 +65,17 @@
*/
class C2ComponentFactory {
public:
+ typedef std::function<void(::android::C2Component*)> ComponentDeleter;
+ typedef std::function<void(::android::C2ComponentInterface*)> InterfaceDeleter;
+
/**
* Creates a component.
*
* This method SHALL return within 100ms.
*
+ * \param id component ID for the created component
* \param component shared pointer where the created component is stored. Cleared on
* failure and updated on success.
- * \param id component ID for the created component
*
* \retval C2_OK the component was created successfully
* \retval C2_TIMED_OUT could not create the component within the time limit (unexpected)
@@ -80,16 +84,17 @@
* \retval C2_NO_MEMORY not enough memory to create the component
*/
virtual c2_status_t createComponent(
- std::shared_ptr<C2Component>* const component, c2_node_id_t id) = 0;
+ c2_node_id_t id, std::shared_ptr<C2Component>* const component,
+ ComponentDeleter deleter = std::default_delete<C2Component>()) = 0;
/**
* Creates a component interface.
*
* This method SHALL return within 100ms.
*
+ * \param id component interface ID for the created interface
* \param interface shared pointer where the created interface is stored. Cleared on
* failure and updated on success.
- * \param id component interface ID for the created interface
*
* \retval C2_OK the component interface was created successfully
* \retval C2_TIMED_OUT could not create the component interface within the time limit
@@ -100,11 +105,22 @@
* \retval C2_NO_MEMORY not enough memory to create the component interface
*/
virtual c2_status_t createInterface(
- std::shared_ptr<C2ComponentInterface>* const interface, c2_node_id_t id) = 0;
+ c2_node_id_t id, std::shared_ptr<C2ComponentInterface>* const interface,
+ InterfaceDeleter deleter = std::default_delete<C2ComponentInterface>()) = 0;
virtual ~C2ComponentFactory() = default;
+
+ typedef ::android::C2ComponentFactory* (*CreateCodec2FactoryFunc)(void);
+ typedef void (*DestroyCodec2FactoryFunc)(::android::C2ComponentFactory*);
};
+/**
+ * Returns the platform component store.
+ * \retval nullptr if the platform component store could not be obtained
+ */
+std::shared_ptr<C2ComponentStore> GetCodec2PlatformComponentStore();
+
+
} // namespace android
#endif // STAGEFRIGHT_CODEC2_PLATFORM_SUPPORT_H_
diff --git a/media/libstagefright/codecs/avcdec/C2SoftAvcDec.cpp b/media/libstagefright/codecs/avcdec/C2SoftAvcDec.cpp
index b904d26..2423629 100644
--- a/media/libstagefright/codecs/avcdec/C2SoftAvcDec.cpp
+++ b/media/libstagefright/codecs/avcdec/C2SoftAvcDec.cpp
@@ -261,7 +261,7 @@
mFrameRate(0u, 0),
mBlocksPerSecond(0u, 0),
mParamReflector(new ParamReflector) {
-
+ ALOGV("in %s", __func__);
mInputPortMime = C2PortMimeConfig::input::alloc_unique(strlen(CODEC_MIME_TYPE) + 1);
strcpy(mInputPortMime->m.mValue, CODEC_MIME_TYPE);
mOutputPortMime = C2PortMimeConfig::output::alloc_unique(strlen(MEDIA_MIMETYPE_VIDEO_RAW) + 1);
@@ -430,6 +430,10 @@
false, "_output_block_pools", mOutputBlockPools.get()));
}
+C2SoftAvcDecIntf::~C2SoftAvcDecIntf() {
+ ALOGV("in %s", __func__);
+}
+
C2String C2SoftAvcDecIntf::getName() const {
return mName;
}
@@ -653,6 +657,7 @@
mWidth(320),
mHeight(240),
mInputOffset(0) {
+ ALOGV("in %s", __func__);
GETTIME(&mTimeStart, NULL);
// If input dump is enabled, then open create an empty file
@@ -661,6 +666,7 @@
}
C2SoftAvcDec::~C2SoftAvcDec() {
+ ALOGV("in %s", __func__);
CHECK_EQ(deInitDecoder(), (status_t)OK);
}
@@ -1505,14 +1511,17 @@
class C2SoftAvcDecFactory : public C2ComponentFactory {
public:
virtual c2_status_t createComponent(
- std::shared_ptr<C2Component>* const component, c2_node_id_t id) override {
- *component = std::make_shared<C2SoftAvcDec>("avc", id);
+ c2_node_id_t id, std::shared_ptr<C2Component>* const component,
+ std::function<void(::android::C2Component*)> deleter) override {
+ *component = std::shared_ptr<C2Component>(new C2SoftAvcDec("avc", id), deleter);
return C2_OK;
}
virtual c2_status_t createInterface(
- std::shared_ptr<C2ComponentInterface>* const interface, c2_node_id_t id) override {
- *interface = std::make_shared<C2SoftAvcDecIntf>("avc", id);
+ c2_node_id_t id, std::shared_ptr<C2ComponentInterface>* const interface,
+ std::function<void(::android::C2ComponentInterface*)> deleter) override {
+ *interface =
+ std::shared_ptr<C2ComponentInterface>(new C2SoftAvcDecIntf("avc", id), deleter);
return C2_OK;
}
@@ -1522,9 +1531,11 @@
} // namespace android
extern "C" ::android::C2ComponentFactory* CreateCodec2Factory() {
+ ALOGV("in %s", __func__);
return new ::android::C2SoftAvcDecFactory();
}
extern "C" void DestroyCodec2Factory(::android::C2ComponentFactory* factory) {
+ ALOGV("in %s", __func__);
delete factory;
}
diff --git a/media/libstagefright/codecs/avcdec/C2SoftAvcDec.h b/media/libstagefright/codecs/avcdec/C2SoftAvcDec.h
index 91ec003..28f1dfd 100644
--- a/media/libstagefright/codecs/avcdec/C2SoftAvcDec.h
+++ b/media/libstagefright/codecs/avcdec/C2SoftAvcDec.h
@@ -77,7 +77,7 @@
};
C2SoftAvcDecIntf(const char *name, c2_node_id_t id);
- virtual ~C2SoftAvcDecIntf() = default;
+ virtual ~C2SoftAvcDecIntf() override;
// From C2ComponentInterface
virtual C2String getName() const override;
diff --git a/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp b/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
index 358c743..32fdbd3 100644
--- a/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
+++ b/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
@@ -1170,6 +1170,12 @@
ps_inp_raw_buf->e_color_fmt = mIvVideoColorFormat;
source = NULL;
if ((inputBufferHeader != NULL) && inputBufferHeader->nFilledLen) {
+ OMX_ERRORTYPE error = validateInputBuffer(inputBufferHeader);
+ if (error != OMX_ErrorNone) {
+ ALOGE("b/69065651");
+ android_errorWriteLog(0x534e4554, "69065651");
+ return error;
+ }
source = inputBufferHeader->pBuffer + inputBufferHeader->nOffset;
if (mInputDataIsMeta) {
diff --git a/media/libstagefright/codecs/cmds/Android.bp b/media/libstagefright/codecs/cmds/Android.bp
index e44e53c..ad0bd2d 100644
--- a/media/libstagefright/codecs/cmds/Android.bp
+++ b/media/libstagefright/codecs/cmds/Android.bp
@@ -22,7 +22,6 @@
"libstagefright",
"libstagefright_codec2",
"libstagefright_foundation",
- "libstagefright_soft_c2avcdec",
"libui",
"libutils",
],
diff --git a/media/libstagefright/codecs/cmds/codec2.cpp b/media/libstagefright/codecs/cmds/codec2.cpp
index 8e2c4b9..1972a7a 100644
--- a/media/libstagefright/codecs/cmds/codec2.cpp
+++ b/media/libstagefright/codecs/cmds/codec2.cpp
@@ -211,10 +211,9 @@
return;
}
- std::unique_ptr<C2ComponentFactory> factory(CreateCodec2Factory());
+ std::shared_ptr<C2ComponentStore> store = GetCodec2PlatformComponentStore();
std::shared_ptr<C2Component> component;
- (void)factory->createComponent(&component, 0);
- DestroyCodec2Factory(factory.release());
+ (void)store->createComponent("c2.google.avc.decoder", &component);
(void)component->setListener_sm(mListener);
std::unique_ptr<C2PortBlockPoolsTuning::output> pools =
diff --git a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
index 7b90a01..f6a7b0e 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
+++ b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
@@ -434,6 +434,14 @@
}
if (inHeader->nFilledLen > 0) {
+ OMX_ERRORTYPE error = validateInputBuffer(inHeader);
+ if (error != OMX_ErrorNone) {
+ ALOGE("b/69065651");
+ android_errorWriteLog(0x534e4554, "69065651");
+ mSignalledError = true;
+ notify(OMX_EventError, error, 0, 0);
+ return;
+ }
const uint8_t *inputData = NULL;
if (mInputDataIsMeta) {
inputData =
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
index a5666da..f6257b1 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
@@ -653,6 +653,13 @@
return;
}
+ OMX_ERRORTYPE error = validateInputBuffer(inputBufferHeader);
+ if (error != OMX_ErrorNone) {
+ ALOGE("b/27569635");
+ android_errorWriteLog(0x534e4554, "27569635");
+ notify(OMX_EventError, error, 0, 0);
+ return;
+ }
const uint8_t *source =
inputBufferHeader->pBuffer + inputBufferHeader->nOffset;
@@ -668,14 +675,6 @@
return;
}
} else {
- if (inputBufferHeader->nFilledLen < frameSize) {
- android_errorWriteLog(0x534e4554, "27569635");
- notify(OMX_EventError, OMX_ErrorUndefined, 0, 0);
- return;
- } else if (inputBufferHeader->nFilledLen > frameSize) {
- ALOGW("Input buffer contains too many pixels");
- }
-
if (mColorFormat == OMX_COLOR_FormatYUV420SemiPlanar) {
ConvertYUV420SemiPlanarToYUV420Planar(
source, mConversionBuffer, mWidth, mHeight);
diff --git a/media/libstagefright/http/MediaHTTP.cpp b/media/libstagefright/http/MediaHTTP.cpp
index 5b18814..84837e8 100644
--- a/media/libstagefright/http/MediaHTTP.cpp
+++ b/media/libstagefright/http/MediaHTTP.cpp
@@ -25,11 +25,11 @@
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/Utils.h>
-#include <media/IMediaHTTPConnection.h>
+#include <media/MediaHTTPConnection.h>
namespace android {
-MediaHTTP::MediaHTTP(const sp<IMediaHTTPConnection> &conn)
+MediaHTTP::MediaHTTP(const sp<MediaHTTPConnection> &conn)
: mInitCheck((conn != NULL) ? OK : NO_INIT),
mHTTPConnection(conn),
mCachedSizeValid(false),
diff --git a/media/libstagefright/httplive/HTTPDownloader.cpp b/media/libstagefright/httplive/HTTPDownloader.cpp
index 3fef764..72604e3 100644
--- a/media/libstagefright/httplive/HTTPDownloader.cpp
+++ b/media/libstagefright/httplive/HTTPDownloader.cpp
@@ -22,8 +22,8 @@
#include "M3UParser.h"
#include <media/DataSource.h>
-#include <media/IMediaHTTPConnection.h>
-#include <media/IMediaHTTPService.h>
+#include <media/MediaHTTPConnection.h>
+#include <media/MediaHTTPService.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/MediaHTTP.h>
@@ -36,7 +36,7 @@
namespace android {
HTTPDownloader::HTTPDownloader(
- const sp<IMediaHTTPService> &httpService,
+ const sp<MediaHTTPService> &httpService,
const KeyedVector<String8, String8> &headers) :
mHTTPDataSource(new MediaHTTP(httpService->makeHTTPConnection())),
mExtraHeaders(headers),
diff --git a/media/libstagefright/httplive/HTTPDownloader.h b/media/libstagefright/httplive/HTTPDownloader.h
index 1db4a48..0d4bd31 100644
--- a/media/libstagefright/httplive/HTTPDownloader.h
+++ b/media/libstagefright/httplive/HTTPDownloader.h
@@ -28,12 +28,12 @@
struct ABuffer;
class DataSource;
struct HTTPBase;
-struct IMediaHTTPService;
+struct MediaHTTPService;
struct M3UParser;
struct HTTPDownloader : public RefBase {
HTTPDownloader(
- const sp<IMediaHTTPService> &httpService,
+ const sp<MediaHTTPService> &httpService,
const KeyedVector<String8, String8> &headers);
void reconnect();
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index 4c2e0d4..1e2e684 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -26,7 +26,7 @@
#include "mpeg2ts/AnotherPacketSource.h"
#include <cutils/properties.h>
-#include <media/IMediaHTTPService.h>
+#include <media/MediaHTTPService.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
@@ -274,7 +274,7 @@
LiveSession::LiveSession(
const sp<AMessage> ¬ify, uint32_t flags,
- const sp<IMediaHTTPService> &httpService)
+ const sp<MediaHTTPService> &httpService)
: mNotify(notify),
mFlags(flags),
mHTTPService(httpService),
diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h
index abf8cf0..7a6d487 100644
--- a/media/libstagefright/httplive/LiveSession.h
+++ b/media/libstagefright/httplive/LiveSession.h
@@ -33,7 +33,7 @@
struct AnotherPacketSource;
class DataSource;
struct HTTPBase;
-struct IMediaHTTPService;
+struct MediaHTTPService;
struct LiveDataSource;
struct M3UParser;
struct PlaylistFetcher;
@@ -71,7 +71,7 @@
LiveSession(
const sp<AMessage> ¬ify,
uint32_t flags,
- const sp<IMediaHTTPService> &httpService);
+ const sp<MediaHTTPService> &httpService);
void setBufferingSettings(const BufferingSettings &buffering);
@@ -187,7 +187,7 @@
sp<AMessage> mNotify;
uint32_t mFlags;
- sp<IMediaHTTPService> mHTTPService;
+ sp<MediaHTTPService> mHTTPService;
bool mBuffering;
bool mInPreparationPhase;
diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp
index 71d625f..bc3e57c 100644
--- a/media/libstagefright/httplive/M3UParser.cpp
+++ b/media/libstagefright/httplive/M3UParser.cpp
@@ -898,6 +898,9 @@
}
}
+ if (meta->get() == NULL) {
+ return ERROR_MALFORMED;
+ }
return OK;
}
diff --git a/media/libstagefright/include/SDPLoader.h b/media/libstagefright/include/SDPLoader.h
index 2c4f543..b901c97 100644
--- a/media/libstagefright/include/SDPLoader.h
+++ b/media/libstagefright/include/SDPLoader.h
@@ -25,7 +25,7 @@
namespace android {
struct HTTPBase;
-struct IMediaHTTPService;
+struct MediaHTTPService;
struct SDPLoader : public AHandler {
enum Flags {
@@ -38,7 +38,7 @@
SDPLoader(
const sp<AMessage> ¬ify,
uint32_t flags,
- const sp<IMediaHTTPService> &httpService);
+ const sp<MediaHTTPService> &httpService);
void load(const char* url, const KeyedVector<String8, String8> *headers);
diff --git a/media/libstagefright/include/media/stagefright/DataSourceFactory.h b/media/libstagefright/include/media/stagefright/DataSourceFactory.h
index 89add13..2a1d491 100644
--- a/media/libstagefright/include/media/stagefright/DataSourceFactory.h
+++ b/media/libstagefright/include/media/stagefright/DataSourceFactory.h
@@ -23,20 +23,20 @@
namespace android {
-struct IMediaHTTPService;
+struct MediaHTTPService;
class String8;
struct HTTPBase;
class DataSourceFactory {
public:
static sp<DataSource> CreateFromURI(
- const sp<IMediaHTTPService> &httpService,
+ const sp<MediaHTTPService> &httpService,
const char *uri,
const KeyedVector<String8, String8> *headers = NULL,
String8 *contentType = NULL,
HTTPBase *httpSource = NULL);
- static sp<DataSource> CreateMediaHTTP(const sp<IMediaHTTPService> &httpService);
+ static sp<DataSource> CreateMediaHTTP(const sp<MediaHTTPService> &httpService);
static sp<DataSource> CreateFromFd(int fd, int64_t offset, int64_t length);
};
diff --git a/media/libstagefright/include/media/stagefright/MediaHTTP.h b/media/libstagefright/include/media/stagefright/MediaHTTP.h
index 006d8d8..94a2ecd 100644
--- a/media/libstagefright/include/media/stagefright/MediaHTTP.h
+++ b/media/libstagefright/include/media/stagefright/MediaHTTP.h
@@ -24,10 +24,10 @@
namespace android {
-struct IMediaHTTPConnection;
+struct MediaHTTPConnection;
struct MediaHTTP : public HTTPBase {
- MediaHTTP(const sp<IMediaHTTPConnection> &conn);
+ MediaHTTP(const sp<MediaHTTPConnection> &conn);
virtual status_t connect(
const char *uri,
@@ -56,7 +56,7 @@
private:
status_t mInitCheck;
- sp<IMediaHTTPConnection> mHTTPConnection;
+ sp<MediaHTTPConnection> mHTTPConnection;
KeyedVector<String8, String8> mLastHeaders;
AString mLastURI;
diff --git a/media/libstagefright/include/media/stagefright/NuMediaExtractor.h b/media/libstagefright/include/media/stagefright/NuMediaExtractor.h
index 5af0745..eed0f05 100644
--- a/media/libstagefright/include/media/stagefright/NuMediaExtractor.h
+++ b/media/libstagefright/include/media/stagefright/NuMediaExtractor.h
@@ -34,7 +34,7 @@
struct ABuffer;
struct AMessage;
class DataSource;
-struct IMediaHTTPService;
+struct MediaHTTPService;
class MediaBuffer;
class MediaExtractor;
struct MediaSource;
@@ -54,7 +54,7 @@
NuMediaExtractor();
status_t setDataSource(
- const sp<IMediaHTTPService> &httpService,
+ const sp<MediaHTTPService> &httpService,
const char *path,
const KeyedVector<String8, String8> *headers = NULL);
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index a70005e..f331dbb 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -46,6 +46,36 @@
namespace android {
+namespace {
+// kTimestampFluctuation is an upper bound of timestamp fluctuation from the
+// source that GraphicBufferSource allows. The unit of kTimestampFluctuation is
+// frames. More specifically, GraphicBufferSource will drop a frame if
+//
+// expectedNewFrametimestamp - actualNewFrameTimestamp <
+// (0.5 - kTimestampFluctuation) * expectedtimePeriodBetweenFrames
+//
+// where
+// - expectedNewFrameTimestamp is the calculated ideal timestamp of the new
+// incoming frame
+// - actualNewFrameTimestamp is the timestamp received from the source
+// - expectedTimePeriodBetweenFrames is the ideal difference of the timestamps
+// of two adjacent frames
+//
+// See GraphicBufferSource::calculateCodecTimestamp_l() for more detail about
+// how kTimestampFluctuation is used.
+//
+// kTimestampFluctuation should be non-negative. A higher value causes a smaller
+// chance of dropping frames, but at the same time a higher bound on the
+// difference between the source timestamp and the interpreted (snapped)
+// timestamp.
+//
+// The value of 0.05 means that GraphicBufferSource expects the input timestamps
+// to fluctuate no more than 5% from the regular time period.
+//
+// TODO: Justify the choice of this value, or make it configurable.
+constexpr double kTimestampFluctuation = 0.05;
+}
+
/**
* A copiable object managing a buffer in the buffer cache managed by the producer. This object
* holds a reference to the buffer, and maintains which buffer slot it belongs to (if any), and
@@ -732,14 +762,16 @@
mFrameCount = 0;
} else {
// snap to nearest capture point
- int64_t nFrames = std::llround(
- (timeUs - mPrevCaptureUs) * mCaptureFps / 1000000);
- if (nFrames <= 0) {
+ double nFrames = (timeUs - mPrevCaptureUs) * mCaptureFps / 1000000;
+ if (nFrames < 0.5 - kTimestampFluctuation) {
// skip this frame as it's too close to previous capture
ALOGV("skipping frame, timeUs %lld", static_cast<long long>(timeUs));
return false;
}
- mFrameCount += nFrames;
+ if (nFrames <= 1.0) {
+ nFrames = 1.0;
+ }
+ mFrameCount += std::llround(nFrames);
mPrevCaptureUs = mBaseCaptureUs + std::llround(
mFrameCount * 1000000 / mCaptureFps);
mPrevFrameUs = mBaseFrameUs + std::llround(
diff --git a/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
index fa15ab3..2fbbb44 100644
--- a/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
+++ b/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
@@ -664,4 +664,17 @@
return SimpleSoftOMXComponent::getExtensionIndex(name, index);
}
+OMX_ERRORTYPE SoftVideoEncoderOMXComponent::validateInputBuffer(
+ const OMX_BUFFERHEADERTYPE *inputBufferHeader) {
+ size_t frameSize = mInputDataIsMeta ?
+ max(sizeof(VideoNativeMetadata), sizeof(VideoGrallocMetadata))
+ : mWidth * mHeight * 3 / 2;
+ if (inputBufferHeader->nFilledLen < frameSize) {
+ return OMX_ErrorUndefined;
+ } else if (inputBufferHeader->nFilledLen > frameSize) {
+ ALOGW("Input buffer contains more data than expected.");
+ }
+ return OMX_ErrorNone;
+}
+
} // namespace android
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoEncoderOMXComponent.h b/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoEncoderOMXComponent.h
index db5496a..2d6f31b 100644
--- a/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoEncoderOMXComponent.h
+++ b/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoEncoderOMXComponent.h
@@ -67,6 +67,8 @@
virtual OMX_ERRORTYPE getExtensionIndex(const char *name, OMX_INDEXTYPE *index);
+ OMX_ERRORTYPE validateInputBuffer(const OMX_BUFFERHEADERTYPE *inputBufferHeader);
+
enum {
kInputPortIndex = 0,
kOutputPortIndex = 1,
diff --git a/media/libstagefright/rtsp/SDPLoader.cpp b/media/libstagefright/rtsp/SDPLoader.cpp
index 0f46c83..d459cbd 100644
--- a/media/libstagefright/rtsp/SDPLoader.cpp
+++ b/media/libstagefright/rtsp/SDPLoader.cpp
@@ -22,8 +22,8 @@
#include "ASessionDescription.h"
-#include <media/IMediaHTTPConnection.h>
-#include <media/IMediaHTTPService.h>
+#include <media/MediaHTTPConnection.h>
+#include <media/MediaHTTPService.h>
#include <media/stagefright/MediaHTTP.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
@@ -36,7 +36,7 @@
SDPLoader::SDPLoader(
const sp<AMessage> ¬ify,
uint32_t flags,
- const sp<IMediaHTTPService> &httpService)
+ const sp<MediaHTTPService> &httpService)
: mNotify(notify),
mFlags(flags),
mNetLooper(new ALooper),
diff --git a/media/mtp/MtpDatabase.h b/media/mtp/MtpDatabase.h
index 2395f4f..f3f9720 100644
--- a/media/mtp/MtpDatabase.h
+++ b/media/mtp/MtpDatabase.h
@@ -45,6 +45,8 @@
MtpObjectFormat format,
bool succeeded) = 0;
+ virtual void doScanDirectory(const char* path) = 0;
+
virtual MtpObjectHandleList* getObjectList(MtpStorageID storageID,
MtpObjectFormat format,
MtpObjectHandle parent) = 0;
diff --git a/media/mtp/MtpServer.cpp b/media/mtp/MtpServer.cpp
index 6080868..bb0414d 100644
--- a/media/mtp/MtpServer.cpp
+++ b/media/mtp/MtpServer.cpp
@@ -1148,6 +1148,7 @@
ALOGV("Copying file from %s to %s", (const char*)fromPath, (const char*)path);
if (format == MTP_FORMAT_ASSOCIATION) {
int ret = makeFolder((const char *)path);
+ ret += copyRecursive(fromPath, path);
if (ret) {
result = MTP_RESPONSE_GENERAL_ERROR;
}
@@ -1158,6 +1159,8 @@
}
mDatabase->endSendObject(path, handle, format, result);
+ if (format == MTP_FORMAT_ASSOCIATION)
+ mDatabase->doScanDirectory(path);
mResponse.setParameter(1, handle);
return result;
}
diff --git a/media/ndk/NdkMediaCodec.cpp b/media/ndk/NdkMediaCodec.cpp
index 11dedbb..6b20bca 100644
--- a/media/ndk/NdkMediaCodec.cpp
+++ b/media/ndk/NdkMediaCodec.cpp
@@ -52,6 +52,7 @@
enum {
kWhatActivityNotify,
+ kWhatAsyncNotify,
kWhatRequestActivityNotifications,
kWhatStopActivityNotifications,
};
@@ -88,6 +89,11 @@
bool mRequestedActivityNotification;
OnCodecEvent mCallback;
void *mCallbackUserData;
+
+ sp<AMessage> mAsyncNotify;
+ mutable Mutex mAsyncCallbackLock;
+ AMediaCodecOnAsyncNotifyCallback mAsyncCallback;
+ void *mAsyncCallbackUserData;
};
CodecHandler::CodecHandler(AMediaCodec *codec) {
@@ -128,6 +134,147 @@
break;
}
+ case kWhatAsyncNotify:
+ {
+ int32_t cbID;
+ if (!msg->findInt32("callbackID", &cbID)) {
+ ALOGE("kWhatAsyncNotify: callbackID is expected.");
+ break;
+ }
+
+ ALOGV("kWhatAsyncNotify: cbID = %d", cbID);
+
+ switch (cbID) {
+ case MediaCodec::CB_INPUT_AVAILABLE:
+ {
+ int32_t index;
+ if (!msg->findInt32("index", &index)) {
+ ALOGE("CB_INPUT_AVAILABLE: index is expected.");
+ break;
+ }
+
+ Mutex::Autolock _l(mCodec->mAsyncCallbackLock);
+ if (mCodec->mAsyncCallbackUserData != NULL
+ || mCodec->mAsyncCallback.onAsyncInputAvailable != NULL) {
+ mCodec->mAsyncCallback.onAsyncInputAvailable(
+ mCodec,
+ mCodec->mAsyncCallbackUserData,
+ index);
+ }
+
+ break;
+ }
+
+ case MediaCodec::CB_OUTPUT_AVAILABLE:
+ {
+ int32_t index;
+ size_t offset;
+ size_t size;
+ int64_t timeUs;
+ int32_t flags;
+
+ if (!msg->findInt32("index", &index)) {
+ ALOGE("CB_OUTPUT_AVAILABLE: index is expected.");
+ break;
+ }
+ if (!msg->findSize("offset", &offset)) {
+ ALOGE("CB_OUTPUT_AVAILABLE: offset is expected.");
+ break;
+ }
+ if (!msg->findSize("size", &size)) {
+ ALOGE("CB_OUTPUT_AVAILABLE: size is expected.");
+ break;
+ }
+ if (!msg->findInt64("timeUs", &timeUs)) {
+ ALOGE("CB_OUTPUT_AVAILABLE: timeUs is expected.");
+ break;
+ }
+ if (!msg->findInt32("flags", &flags)) {
+ ALOGE("CB_OUTPUT_AVAILABLE: flags is expected.");
+ break;
+ }
+
+ AMediaCodecBufferInfo bufferInfo = {
+ (int32_t)offset,
+ (int32_t)size,
+ timeUs,
+ (uint32_t)flags};
+
+ Mutex::Autolock _l(mCodec->mAsyncCallbackLock);
+ if (mCodec->mAsyncCallbackUserData != NULL
+ || mCodec->mAsyncCallback.onAsyncOutputAvailable != NULL) {
+ mCodec->mAsyncCallback.onAsyncOutputAvailable(
+ mCodec,
+ mCodec->mAsyncCallbackUserData,
+ index,
+ &bufferInfo);
+ }
+
+ break;
+ }
+
+ case MediaCodec::CB_OUTPUT_FORMAT_CHANGED:
+ {
+ sp<AMessage> format;
+ if (!msg->findMessage("format", &format)) {
+ ALOGE("CB_OUTPUT_FORMAT_CHANGED: format is expected.");
+ break;
+ }
+
+ AMediaFormat *aMediaFormat = AMediaFormat_fromMsg(&format);
+
+ Mutex::Autolock _l(mCodec->mAsyncCallbackLock);
+ if (mCodec->mAsyncCallbackUserData != NULL
+ || mCodec->mAsyncCallback.onAsyncFormatChanged != NULL) {
+ mCodec->mAsyncCallback.onAsyncFormatChanged(
+ mCodec,
+ mCodec->mAsyncCallbackUserData,
+ aMediaFormat);
+ }
+
+ break;
+ }
+
+ case MediaCodec::CB_ERROR:
+ {
+ status_t err;
+ int32_t actionCode;
+ AString detail;
+ if (!msg->findInt32("err", &err)) {
+ ALOGE("CB_ERROR: err is expected.");
+ break;
+ }
+ if (!msg->findInt32("action", &actionCode)) {
+ ALOGE("CB_ERROR: action is expected.");
+ break;
+ }
+ msg->findString("detail", &detail);
+ ALOGE("Decoder reported error(0x%x), actionCode(%d), detail(%s)",
+ err, actionCode, detail.c_str());
+
+ Mutex::Autolock _l(mCodec->mAsyncCallbackLock);
+ if (mCodec->mAsyncCallbackUserData != NULL
+ || mCodec->mAsyncCallback.onAsyncError != NULL) {
+ mCodec->mAsyncCallback.onAsyncError(
+ mCodec,
+ mCodec->mAsyncCallbackUserData,
+ translate_error(err),
+ actionCode,
+ detail.c_str());
+ }
+
+ break;
+ }
+
+ default:
+ {
+ ALOGE("kWhatAsyncNotify: callbackID(%d) is unexpected.", cbID);
+ break;
+ }
+ }
+ break;
+ }
+
case kWhatStopActivityNotifications:
{
sp<AReplyToken> replyID;
@@ -162,7 +309,7 @@
size_t res = mData->mLooper->start(
false, // runOnCallingThread
true, // canCallJava XXX
- PRIORITY_FOREGROUND);
+ PRIORITY_AUDIO);
if (res != OK) {
ALOGE("Failed to start the looper");
AMediaCodec_delete(mData);
@@ -183,6 +330,9 @@
mData->mRequestedActivityNotification = false;
mData->mCallback = NULL;
+ mData->mAsyncCallback = {};
+ mData->mAsyncCallbackUserData = NULL;
+
return mData;
}
@@ -222,6 +372,32 @@
}
EXPORT
+media_status_t AMediaCodec_getName(
+ AMediaCodec *mData,
+ char** out_name) {
+ if (out_name == NULL) {
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+
+ AString compName;
+ status_t err = mData->mCodec->getName(&compName);
+ if (err != OK) {
+ return translate_error(err);
+ }
+ *out_name = strdup(compName.c_str());
+ return AMEDIA_OK;
+}
+
+EXPORT
+void AMediaCodec_releaseName(
+ AMediaCodec * /* mData */,
+ char* name) {
+ if (name != NULL) {
+ free(name);
+ }
+}
+
+EXPORT
media_status_t AMediaCodec_configure(
AMediaCodec *mData,
const AMediaFormat* format,
@@ -236,8 +412,40 @@
surface = (Surface*) window;
}
- return translate_error(mData->mCodec->configure(nativeFormat, surface,
- crypto ? crypto->mCrypto : NULL, flags));
+ status_t err = mData->mCodec->configure(nativeFormat, surface,
+ crypto ? crypto->mCrypto : NULL, flags);
+ if (err != OK) {
+ ALOGE("configure: err(%d), failed with format: %s",
+ err, nativeFormat->debugString(0).c_str());
+ }
+ return translate_error(err);
+}
+
+EXPORT
+media_status_t AMediaCodec_setAsyncNotifyCallback(
+ AMediaCodec *mData,
+ AMediaCodecOnAsyncNotifyCallback callback,
+ void *userdata) {
+ if (mData->mAsyncNotify == NULL && userdata != NULL) {
+ mData->mAsyncNotify = new AMessage(kWhatAsyncNotify, mData->mHandler);
+ status_t err = mData->mCodec->setCallback(mData->mAsyncNotify);
+ if (err != OK) {
+ ALOGE("setAsyncNotifyCallback: err(%d), failed to set async callback", err);
+ return translate_error(err);
+ }
+ }
+
+ Mutex::Autolock _l(mData->mAsyncCallbackLock);
+ mData->mAsyncCallback = callback;
+ mData->mAsyncCallbackUserData = userdata;
+
+ return AMEDIA_OK;
+}
+
+
+EXPORT
+media_status_t AMediaCodec_releaseCrypto(AMediaCodec *mData) {
+ return translate_error(mData->mCodec->releaseCrypto());
}
EXPORT
@@ -282,6 +490,19 @@
EXPORT
uint8_t* AMediaCodec_getInputBuffer(AMediaCodec *mData, size_t idx, size_t *out_size) {
+ if (mData->mAsyncNotify != NULL) {
+ // Asynchronous mode
+ sp<MediaCodecBuffer> abuf;
+ if (mData->mCodec->getInputBuffer(idx, &abuf) != 0) {
+ return NULL;
+ }
+
+ if (out_size != NULL) {
+ *out_size = abuf->capacity();
+ }
+ return abuf->data();
+ }
+
android::Vector<android::sp<android::MediaCodecBuffer> > abufs;
if (mData->mCodec->getInputBuffers(&abufs) == 0) {
size_t n = abufs.size();
@@ -304,6 +525,19 @@
EXPORT
uint8_t* AMediaCodec_getOutputBuffer(AMediaCodec *mData, size_t idx, size_t *out_size) {
+ if (mData->mAsyncNotify != NULL) {
+ // Asynchronous mode
+ sp<MediaCodecBuffer> abuf;
+ if (mData->mCodec->getOutputBuffer(idx, &abuf) != 0) {
+ return NULL;
+ }
+
+ if (out_size != NULL) {
+ *out_size = abuf->capacity();
+ }
+ return abuf->data();
+ }
+
android::Vector<android::sp<android::MediaCodecBuffer> > abufs;
if (mData->mCodec->getOutputBuffers(&abufs) == 0) {
size_t n = abufs.size();
@@ -367,6 +601,13 @@
}
EXPORT
+AMediaFormat* AMediaCodec_getInputFormat(AMediaCodec *mData) {
+ sp<AMessage> format;
+ mData->mCodec->getInputFormat(&format);
+ return AMediaFormat_fromMsg(&format);
+}
+
+EXPORT
AMediaFormat* AMediaCodec_getBufferFormat(AMediaCodec *mData, size_t index) {
sp<AMessage> format;
mData->mCodec->getOutputFormat(index, &format);
@@ -542,6 +783,16 @@
return translate_error(err);
}
+EXPORT
+bool AMediaCodecActionCode_isRecoverable(int32_t actionCode) {
+ return (actionCode == ACTION_CODE_RECOVERABLE);
+}
+
+EXPORT
+bool AMediaCodecActionCode_isTransient(int32_t actionCode) {
+ return (actionCode == ACTION_CODE_TRANSIENT);
+}
+
EXPORT
void AMediaCodecCryptoInfo_setPattern(AMediaCodecCryptoInfo *info,
diff --git a/media/ndk/NdkMediaFormat.cpp b/media/ndk/NdkMediaFormat.cpp
index ee27520..a9025c0 100644
--- a/media/ndk/NdkMediaFormat.cpp
+++ b/media/ndk/NdkMediaFormat.cpp
@@ -125,6 +125,14 @@
ret.appendFormat("double(%f)", val);
break;
}
+ case AMessage::kTypeRect:
+ {
+ int32_t left, top, right, bottom;
+ f->findRect(name, &left, &top, &right, &bottom);
+ ret.appendFormat("Rect(%" PRId32 ", %" PRId32 ", %" PRId32 ", %" PRId32 ")",
+ left, top, right, bottom);
+ break;
+ }
case AMessage::kTypeString:
{
AString val;
@@ -165,11 +173,22 @@
}
EXPORT
+bool AMediaFormat_getDouble(AMediaFormat* format, const char *name, double *out) {
+ return format->mFormat->findDouble(name, out);
+}
+
+EXPORT
bool AMediaFormat_getSize(AMediaFormat* format, const char *name, size_t *out) {
return format->mFormat->findSize(name, out);
}
EXPORT
+bool AMediaFormat_getRect(AMediaFormat* format, const char *name,
+ int32_t *left, int32_t *top, int32_t *right, int32_t *bottom) {
+ return format->mFormat->findRect(name, left, top, right, bottom);
+}
+
+EXPORT
bool AMediaFormat_getBuffer(AMediaFormat* format, const char *name, void** data, size_t *outsize) {
sp<ABuffer> buf;
if (format->mFormat->findBuffer(name, &buf)) {
@@ -216,6 +235,22 @@
}
EXPORT
+void AMediaFormat_setDouble(AMediaFormat* format, const char* name, double value) {
+ format->mFormat->setDouble(name, value);
+}
+
+EXPORT
+void AMediaFormat_setSize(AMediaFormat* format, const char* name, size_t value) {
+ format->mFormat->setSize(name, value);
+}
+
+EXPORT
+void AMediaFormat_setRect(AMediaFormat* format, const char *name,
+ int32_t left, int32_t top, int32_t right, int32_t bottom) {
+ format->mFormat->setRect(name, left, top, right, bottom);
+}
+
+EXPORT
void AMediaFormat_setString(AMediaFormat* format, const char* name, const char* value) {
// AMessage::setString() makes a copy of the string
format->mFormat->setString(name, value, strlen(value));
@@ -233,30 +268,61 @@
}
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_DRC_ATTENUATION_FACTOR = "aac-drc-cut-level";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_DRC_BOOST_FACTOR = "aac-drc-boost-level";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_DRC_HEAVY_COMPRESSION = "aac-drc-heavy-compression";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_DRC_TARGET_REFERENCE_LEVEL = "aac-target-ref-level";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_ENCODED_TARGET_LEVEL = "aac-encoded-target-level";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT = "aac-max-output-channel_count";
EXPORT const char* AMEDIAFORMAT_KEY_AAC_PROFILE = "aac-profile";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_SBR_MODE = "aac-sbr-mode";
+EXPORT const char* AMEDIAFORMAT_KEY_AUDIO_SESSION_ID = "audio-session-id";
+EXPORT const char* AMEDIAFORMAT_KEY_BITRATE_MODE = "bitrate-mode";
EXPORT const char* AMEDIAFORMAT_KEY_BIT_RATE = "bitrate";
+EXPORT const char* AMEDIAFORMAT_KEY_CAPTURE_RATE = "capture-rate";
EXPORT const char* AMEDIAFORMAT_KEY_CHANNEL_COUNT = "channel-count";
EXPORT const char* AMEDIAFORMAT_KEY_CHANNEL_MASK = "channel-mask";
EXPORT const char* AMEDIAFORMAT_KEY_COLOR_FORMAT = "color-format";
+EXPORT const char* AMEDIAFORMAT_KEY_COLOR_RANGE = "color-range";
+EXPORT const char* AMEDIAFORMAT_KEY_COLOR_STANDARD = "color-standard";
+EXPORT const char* AMEDIAFORMAT_KEY_COLOR_TRANSFER = "color-transfer";
+EXPORT const char* AMEDIAFORMAT_KEY_COMPLEXITY = "complexity";
+EXPORT const char* AMEDIAFORMAT_KEY_DISPLAY_CROP = "crop";
EXPORT const char* AMEDIAFORMAT_KEY_DURATION = "durationUs";
EXPORT const char* AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL = "flac-compression-level";
EXPORT const char* AMEDIAFORMAT_KEY_FRAME_RATE = "frame-rate";
+EXPORT const char* AMEDIAFORMAT_KEY_GRID_COLS = "grid-cols";
+EXPORT const char* AMEDIAFORMAT_KEY_GRID_HEIGHT = "grid-height";
+EXPORT const char* AMEDIAFORMAT_KEY_GRID_ROWS = "grid-rows";
+EXPORT const char* AMEDIAFORMAT_KEY_GRID_WIDTH = "grid-width";
+EXPORT const char* AMEDIAFORMAT_KEY_HDR_STATIC_INFO = "hdr-static-info";
EXPORT const char* AMEDIAFORMAT_KEY_HEIGHT = "height";
+EXPORT const char* AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD = "intra-refresh-period";
EXPORT const char* AMEDIAFORMAT_KEY_IS_ADTS = "is-adts";
EXPORT const char* AMEDIAFORMAT_KEY_IS_AUTOSELECT = "is-autoselect";
EXPORT const char* AMEDIAFORMAT_KEY_IS_DEFAULT = "is-default";
EXPORT const char* AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE = "is-forced-subtitle";
EXPORT const char* AMEDIAFORMAT_KEY_I_FRAME_INTERVAL = "i-frame-interval";
EXPORT const char* AMEDIAFORMAT_KEY_LANGUAGE = "language";
+EXPORT const char* AMEDIAFORMAT_KEY_LATENCY = "latency";
+EXPORT const char* AMEDIAFORMAT_KEY_LEVEL = "level";
EXPORT const char* AMEDIAFORMAT_KEY_MAX_HEIGHT = "max-height";
EXPORT const char* AMEDIAFORMAT_KEY_MAX_INPUT_SIZE = "max-input-size";
EXPORT const char* AMEDIAFORMAT_KEY_MAX_WIDTH = "max-width";
EXPORT const char* AMEDIAFORMAT_KEY_MIME = "mime";
+EXPORT const char* AMEDIAFORMAT_KEY_OPERATING_RATE = "operating-rate";
+EXPORT const char* AMEDIAFORMAT_KEY_PCM_ENCODING = "pcm-encoding";
+EXPORT const char* AMEDIAFORMAT_KEY_PRIORITY = "priority";
+EXPORT const char* AMEDIAFORMAT_KEY_PROFILE = "profile";
EXPORT const char* AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP = "push-blank-buffers-on-shutdown";
EXPORT const char* AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER = "repeat-previous-frame-after";
+EXPORT const char* AMEDIAFORMAT_KEY_ROTATION = "rotation-degrees";
EXPORT const char* AMEDIAFORMAT_KEY_SAMPLE_RATE = "sample-rate";
-EXPORT const char* AMEDIAFORMAT_KEY_WIDTH = "width";
+EXPORT const char* AMEDIAFORMAT_KEY_SLICE_HEIGHT = "slice-height";
EXPORT const char* AMEDIAFORMAT_KEY_STRIDE = "stride";
+EXPORT const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYERING = "ts-schema";
+EXPORT const char* AMEDIAFORMAT_KEY_TRACK_ID = "track-id";
+EXPORT const char* AMEDIAFORMAT_KEY_WIDTH = "width";
} // extern "C"
diff --git a/media/ndk/include/media/NdkMediaCodec.h b/media/ndk/include/media/NdkMediaCodec.h
index b15de38..f4a51d0 100644
--- a/media/ndk/include/media/NdkMediaCodec.h
+++ b/media/ndk/include/media/NdkMediaCodec.h
@@ -53,11 +53,63 @@
typedef struct AMediaCodecCryptoInfo AMediaCodecCryptoInfo;
enum {
+ AMEDIACODEC_BUFFER_FLAG_CODEC_CONFIG = 2,
AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM = 4,
+ AMEDIACODEC_BUFFER_FLAG_PARTIAL_FRAME = 8,
+
AMEDIACODEC_CONFIGURE_FLAG_ENCODE = 1,
AMEDIACODEC_INFO_OUTPUT_BUFFERS_CHANGED = -3,
AMEDIACODEC_INFO_OUTPUT_FORMAT_CHANGED = -2,
- AMEDIACODEC_INFO_TRY_AGAIN_LATER = -1
+ AMEDIACODEC_INFO_TRY_AGAIN_LATER = -1,
+};
+
+/**
+ * Called when an input buffer becomes available.
+ * The specified index is the index of the available input buffer.
+ */
+typedef void (*AMediaCodecOnAsyncInputAvailable)(
+ AMediaCodec *codec,
+ void *userdata,
+ int32_t index);
+/**
+ * Called when an output buffer becomes available.
+ * The specified index is the index of the available output buffer.
+ * The specified bufferInfo contains information regarding the available output buffer.
+ */
+typedef void (*AMediaCodecOnAsyncOutputAvailable)(
+ AMediaCodec *codec,
+ void *userdata,
+ int32_t index,
+ AMediaCodecBufferInfo *bufferInfo);
+/**
+ * Called when the output format has changed.
+ * The specified format contains the new output format.
+ */
+typedef void (*AMediaCodecOnAsyncFormatChanged)(
+ AMediaCodec *codec,
+ void *userdata,
+ AMediaFormat *format);
+/**
+ * Called when the MediaCodec encountered an error.
+ * The specified actionCode indicates the possible actions that client can take,
+ * and it can be checked by calling AMediaCodecActionCode_isRecoverable or
+ * AMediaCodecActionCode_isTransient. If both AMediaCodecActionCode_isRecoverable()
+ * and AMediaCodecActionCode_isTransient() return false, then the codec error is fatal
+ * and the codec must be deleted.
+ * The specified detail may contain more detailed messages about this error.
+ */
+typedef void (*AMediaCodecOnAsyncError)(
+ AMediaCodec *codec,
+ void *userdata,
+ media_status_t error,
+ int32_t actionCode,
+ const char *detail);
+
+struct AMediaCodecOnAsyncNotifyCallback {
+ AMediaCodecOnAsyncInputAvailable onAsyncInputAvailable;
+ AMediaCodecOnAsyncOutputAvailable onAsyncOutputAvailable;
+ AMediaCodecOnAsyncFormatChanged onAsyncFormatChanged;
+ AMediaCodecOnAsyncError onAsyncError;
};
#if __ANDROID_API__ >= 21
@@ -289,6 +341,71 @@
#endif /* __ANDROID_API__ >= 26 */
+#if __ANDROID_API__ >= 28
+
+/**
+ * Get the component name. If the codec was created by createDecoderByType
+ * or createEncoderByType, what component is chosen is not known beforehand.
+ * Caller shall call AMediaCodec_releaseName to free the returned pointer.
+ */
+media_status_t AMediaCodec_getName(AMediaCodec*, char** out_name);
+
+/**
+ * Free the memory pointed by name which is returned by AMediaCodec_getName.
+ */
+void AMediaCodec_releaseName(AMediaCodec*, char* name);
+
+/**
+ * Set an asynchronous callback for actionable AMediaCodec events.
+ * When asynchronous callback is enabled, the client should not call
+ * AMediaCodec_getInputBuffers(), AMediaCodec_getOutputBuffers(),
+ * AMediaCodec_dequeueInputBuffer() or AMediaCodec_dequeueOutputBuffer().
+ *
+ * Also, AMediaCodec_flush() behaves differently in asynchronous mode.
+ * After calling AMediaCodec_flush(), you must call AMediaCodec_start() to
+ * "resume" receiving input buffers, even if an input surface was created.
+ *
+ * Refer to the definition of AMediaCodecOnAsyncNotifyCallback on how each
+ * callback function is called and what are specified.
+ * The specified userdata is the pointer used when those callback functions are
+ * called.
+ *
+ * All callbacks are fired on one NDK internal thread.
+ * AMediaCodec_setAsyncNotifyCallback should not be called on the callback thread.
+ * No heavy duty task should be performed on callback thread.
+ */
+media_status_t AMediaCodec_setAsyncNotifyCallback(
+ AMediaCodec*,
+ AMediaCodecOnAsyncNotifyCallback callback,
+ void *userdata);
+
+/**
+ * Release the crypto if applicable.
+ */
+media_status_t AMediaCodec_releaseCrypto(AMediaCodec*);
+
+/**
+ * Call this after AMediaCodec_configure() returns successfully to get the input
+ * format accepted by the codec. Do this to determine what optional configuration
+ * parameters were supported by the codec.
+ */
+AMediaFormat* AMediaCodec_getInputFormat(AMediaCodec*);
+
+/**
+ * Returns true if the codec cannot proceed further, but can be recovered by stopping,
+ * configuring, and starting again.
+ */
+bool AMediaCodecActionCode_isRecoverable(int32_t actionCode);
+
+/**
+ * Returns true if the codec error is a transient issue, perhaps due to
+ * resource constraints, and that the method (or encoding/decoding) may be
+ * retried at a later time.
+ */
+bool AMediaCodecActionCode_isTransient(int32_t actionCode);
+
+#endif /* __ANDROID_API__ >= 28 */
+
typedef enum {
AMEDIACODECRYPTOINFO_MODE_CLEAR = 0,
AMEDIACODECRYPTOINFO_MODE_AES_CTR = 1,
diff --git a/media/ndk/include/media/NdkMediaError.h b/media/ndk/include/media/NdkMediaError.h
index da61b64..e48fcbe 100644
--- a/media/ndk/include/media/NdkMediaError.h
+++ b/media/ndk/include/media/NdkMediaError.h
@@ -35,6 +35,17 @@
typedef enum {
AMEDIA_OK = 0,
+ /**
+ * This indicates required resource was not able to be allocated.
+ */
+ AMEDIACODEC_ERROR_INSUFFICIENT_RESOURCE = 1100,
+
+ /**
+ * This indicates the resource manager reclaimed the media resource used by the codec.
+ * With this error, the codec must be released, as it has moved to terminal state.
+ */
+ AMEDIACODEC_ERROR_RECLAIMED = 1101,
+
AMEDIA_ERROR_BASE = -10000,
AMEDIA_ERROR_UNKNOWN = AMEDIA_ERROR_BASE,
AMEDIA_ERROR_MALFORMED = AMEDIA_ERROR_BASE - 1,
diff --git a/media/ndk/include/media/NdkMediaFormat.h b/media/ndk/include/media/NdkMediaFormat.h
index 018ab76..b6489c7 100644
--- a/media/ndk/include/media/NdkMediaFormat.h
+++ b/media/ndk/include/media/NdkMediaFormat.h
@@ -51,6 +51,7 @@
bool AMediaFormat_getInt32(AMediaFormat*, const char *name, int32_t *out);
bool AMediaFormat_getInt64(AMediaFormat*, const char *name, int64_t *out);
bool AMediaFormat_getFloat(AMediaFormat*, const char *name, float *out);
+bool AMediaFormat_getSize(AMediaFormat*, const char *name, size_t *out);
/**
* The returned data is owned by the format and remains valid as long as the named entry
* is part of the format.
@@ -80,33 +81,75 @@
/**
* XXX should these be ints/enums that we look up in a table as needed?
*/
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_ATTENUATION_FACTOR;
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_BOOST_FACTOR;
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_HEAVY_COMPRESSION;
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_TARGET_REFERENCE_LEVEL;
+extern const char* AMEDIAFORMAT_KEY_AAC_ENCODED_TARGET_LEVEL;
+extern const char* AMEDIAFORMAT_KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT;
extern const char* AMEDIAFORMAT_KEY_AAC_PROFILE;
+extern const char* AMEDIAFORMAT_KEY_AAC_SBR_MODE;
+extern const char* AMEDIAFORMAT_KEY_AUDIO_SESSION_ID;
+extern const char* AMEDIAFORMAT_KEY_BITRATE_MODE;
extern const char* AMEDIAFORMAT_KEY_BIT_RATE;
+extern const char* AMEDIAFORMAT_KEY_CAPTURE_RATE;
extern const char* AMEDIAFORMAT_KEY_CHANNEL_COUNT;
extern const char* AMEDIAFORMAT_KEY_CHANNEL_MASK;
extern const char* AMEDIAFORMAT_KEY_COLOR_FORMAT;
+extern const char* AMEDIAFORMAT_KEY_COLOR_RANGE;
+extern const char* AMEDIAFORMAT_KEY_COLOR_STANDARD;
+extern const char* AMEDIAFORMAT_KEY_COLOR_TRANSFER;
+extern const char* AMEDIAFORMAT_KEY_COMPLEXITY;
+extern const char* AMEDIAFORMAT_KEY_DISPLAY_CROP;
extern const char* AMEDIAFORMAT_KEY_DURATION;
extern const char* AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL;
extern const char* AMEDIAFORMAT_KEY_FRAME_RATE;
+extern const char* AMEDIAFORMAT_KEY_GRID_COLS;
+extern const char* AMEDIAFORMAT_KEY_GRID_HEIGHT;
+extern const char* AMEDIAFORMAT_KEY_GRID_ROWS;
+extern const char* AMEDIAFORMAT_KEY_GRID_WIDTH;
+extern const char* AMEDIAFORMAT_KEY_HDR_STATIC_INFO;
extern const char* AMEDIAFORMAT_KEY_HEIGHT;
+extern const char* AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD;
extern const char* AMEDIAFORMAT_KEY_IS_ADTS;
extern const char* AMEDIAFORMAT_KEY_IS_AUTOSELECT;
extern const char* AMEDIAFORMAT_KEY_IS_DEFAULT;
extern const char* AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE;
extern const char* AMEDIAFORMAT_KEY_I_FRAME_INTERVAL;
extern const char* AMEDIAFORMAT_KEY_LANGUAGE;
+extern const char* AMEDIAFORMAT_KEY_LATENCY;
+extern const char* AMEDIAFORMAT_KEY_LEVEL;
extern const char* AMEDIAFORMAT_KEY_MAX_HEIGHT;
extern const char* AMEDIAFORMAT_KEY_MAX_INPUT_SIZE;
extern const char* AMEDIAFORMAT_KEY_MAX_WIDTH;
extern const char* AMEDIAFORMAT_KEY_MIME;
+extern const char* AMEDIAFORMAT_KEY_OPERATING_RATE;
+extern const char* AMEDIAFORMAT_KEY_PCM_ENCODING;
+extern const char* AMEDIAFORMAT_KEY_PRIORITY;
+extern const char* AMEDIAFORMAT_KEY_PROFILE;
extern const char* AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP;
extern const char* AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER;
+extern const char* AMEDIAFORMAT_KEY_ROTATION;
extern const char* AMEDIAFORMAT_KEY_SAMPLE_RATE;
-extern const char* AMEDIAFORMAT_KEY_WIDTH;
+extern const char* AMEDIAFORMAT_KEY_SLICE_HEIGHT;
extern const char* AMEDIAFORMAT_KEY_STRIDE;
+extern const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYERING;
+extern const char* AMEDIAFORMAT_KEY_TRACK_ID;
+extern const char* AMEDIAFORMAT_KEY_WIDTH;
#endif /* __ANDROID_API__ >= 21 */
+#if __ANDROID_API__ >= 28
+bool AMediaFormat_getDouble(AMediaFormat*, const char *name, double *out);
+bool AMediaFormat_getRect(AMediaFormat*, const char *name,
+ int32_t *left, int32_t *top, int32_t *right, int32_t *bottom);
+
+void AMediaFormat_setDouble(AMediaFormat*, const char* name, double value);
+void AMediaFormat_setSize(AMediaFormat*, const char* name, size_t value);
+void AMediaFormat_setRect(AMediaFormat*, const char* name,
+ int32_t left, int32_t top, int32_t right, int32_t bottom);
+#endif /* __ANDROID_API__ >= 28 */
+
__END_DECLS
#endif // _NDK_MEDIA_FORMAT_H
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index d7ad370..f2d97cd 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -26,30 +26,63 @@
AImage_getPlaneRowStride; # introduced=24
AImage_getTimestamp; # introduced=24
AImage_getWidth; # introduced=24
+ AMEDIAFORMAT_KEY_AAC_DRC_ATTENUATION_FACTOR; # var introduced=28
+ AMEDIAFORMAT_KEY_AAC_DRC_BOOST_FACTOR; # var introduced=28
+ AMEDIAFORMAT_KEY_AAC_DRC_HEAVY_COMPRESSION; # var introduced=28
+ AMEDIAFORMAT_KEY_AAC_DRC_TARGET_REFERENCE_LEVEL; # var introduced=28
+ AMEDIAFORMAT_KEY_AAC_ENCODED_TARGET_LEVEL; # var introduced=28
+ AMEDIAFORMAT_KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT; # var introduced=28
AMEDIAFORMAT_KEY_AAC_PROFILE; # var
+ AMEDIAFORMAT_KEY_AAC_SBR_MODE; # var introduced=28
+ AMEDIAFORMAT_KEY_AUDIO_SESSION_ID; # var introduced=28
+ AMEDIAFORMAT_KEY_BITRATE_MODE; # var introduced=28
AMEDIAFORMAT_KEY_BIT_RATE; # var
+ AMEDIAFORMAT_KEY_CAPTURE_RATE; # var introduced=28
AMEDIAFORMAT_KEY_CHANNEL_COUNT; # var
AMEDIAFORMAT_KEY_CHANNEL_MASK; # var
AMEDIAFORMAT_KEY_COLOR_FORMAT; # var
+ AMEDIAFORMAT_KEY_COLOR_RANGE; # var introduced=28
+ AMEDIAFORMAT_KEY_COLOR_STANDARD; # var introduced=28
+ AMEDIAFORMAT_KEY_COLOR_TRANSFER; # var introduced=28
+ AMEDIAFORMAT_KEY_COMPLEXITY; # var introduced=28
+ AMEDIAFORMAT_KEY_DISPLAY_CROP; # var introduced=28
AMEDIAFORMAT_KEY_DURATION; # var
AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL; # var
AMEDIAFORMAT_KEY_FRAME_RATE; # var
+ AMEDIAFORMAT_KEY_GRID_COLS; # var introduced=28
+ AMEDIAFORMAT_KEY_GRID_HEIGHT; # var introduced=28
+ AMEDIAFORMAT_KEY_GRID_ROWS; # var introduced=28
+ AMEDIAFORMAT_KEY_GRID_WIDTH; # var introduced=28
+ AMEDIAFORMAT_KEY_HDR_STATIC_INFO; # var introduced=28
AMEDIAFORMAT_KEY_HEIGHT; # var
+ AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD; # var introduced=28
AMEDIAFORMAT_KEY_IS_ADTS; # var
AMEDIAFORMAT_KEY_IS_AUTOSELECT; # var
AMEDIAFORMAT_KEY_IS_DEFAULT; # var
AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE; # var
AMEDIAFORMAT_KEY_I_FRAME_INTERVAL; # var
AMEDIAFORMAT_KEY_LANGUAGE; # var
+ AMEDIAFORMAT_KEY_LATENCY; # var introduced=28
+ AMEDIAFORMAT_KEY_LEVEL; # var introduced=28
AMEDIAFORMAT_KEY_MAX_HEIGHT; # var
AMEDIAFORMAT_KEY_MAX_INPUT_SIZE; # var
AMEDIAFORMAT_KEY_MAX_WIDTH; # var
AMEDIAFORMAT_KEY_MIME; # var
+ AMEDIAFORMAT_KEY_OPERATING_RATE; # var introduced=28
+ AMEDIAFORMAT_KEY_PCM_ENCODING; # var introduced=28
+ AMEDIAFORMAT_KEY_PRIORITY; # var introduced=28
+ AMEDIAFORMAT_KEY_PROFILE; # var introduced=28
AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP; # var
AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER; # var
+ AMEDIAFORMAT_KEY_ROTATION; # var introduced=28
AMEDIAFORMAT_KEY_SAMPLE_RATE; # var
+ AMEDIAFORMAT_KEY_SLICE_HEIGHT; # var introduced=28
AMEDIAFORMAT_KEY_STRIDE; # var
+ AMEDIAFORMAT_KEY_TEMPORAL_LAYERING; # var introduced=28
+ AMEDIAFORMAT_KEY_TRACK_ID; # var introduced=28
AMEDIAFORMAT_KEY_WIDTH; # var
+ AMediaCodecActionCode_isRecoverable; # introduced=28
+ AMediaCodecActionCode_isTransient; # introduced=28
AMediaCodecCryptoInfo_delete;
AMediaCodecCryptoInfo_getClearBytes;
AMediaCodecCryptoInfo_getEncryptedBytes;
@@ -68,12 +101,16 @@
AMediaCodec_dequeueOutputBuffer;
AMediaCodec_flush;
AMediaCodec_getInputBuffer;
+ AMediaCodec_getInputFormat; # introduced=28
+ AMediaCodec_getName; # introduced=28
AMediaCodec_getOutputBuffer;
AMediaCodec_getOutputFormat;
AMediaCodec_queueInputBuffer;
AMediaCodec_queueSecureInputBuffer;
+ AMediaCodec_releaseCrypto; # introduced=28
AMediaCodec_releaseOutputBuffer;
AMediaCodec_releaseOutputBufferAtTime;
+ AMediaCodec_setAsyncNotifyCallback; # introduced=28
AMediaCodec_setOutputSurface; # introduced=24
AMediaCodec_setParameters; # introduced=26
AMediaCodec_setInputSurface; # introduced=26
@@ -127,16 +164,21 @@
AMediaExtractor_unselectTrack;
AMediaFormat_delete;
AMediaFormat_getBuffer;
+ AMediaFormat_getDouble; # introduced=28
AMediaFormat_getFloat;
AMediaFormat_getInt32;
AMediaFormat_getInt64;
+ AMediaFormat_getRect; # introduced=28
AMediaFormat_getSize;
AMediaFormat_getString;
AMediaFormat_new;
AMediaFormat_setBuffer;
+ AMediaFormat_setDouble; # introduced=28
AMediaFormat_setFloat;
AMediaFormat_setInt32;
AMediaFormat_setInt64;
+ AMediaFormat_setRect; # introduced=28
+ AMediaFormat_setSize; # introduced=28
AMediaFormat_setString;
AMediaFormat_toString;
AMediaMuxer_addTrack;
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index e0d0d7b..bfb0fe2 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -296,6 +296,43 @@
const bool auxType =
(mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY;
+ // safeInputOutputSampleCount is 0 if the channel count between input and output
+ // buffers do not match. This prevents automatic accumulation or copying between the
+ // input and output effect buffers without an intermediary effect process.
+ // TODO: consider implementing channel conversion.
+ const size_t safeInputOutputSampleCount =
+ inChannelCount != outChannelCount ? 0
+ : outChannelCount * std::min(
+ mConfig.inputCfg.buffer.frameCount,
+ mConfig.outputCfg.buffer.frameCount);
+ const auto accumulateInputToOutput = [this, safeInputOutputSampleCount]() {
+#ifdef FLOAT_EFFECT_CHAIN
+ accumulate_float(
+ mConfig.outputCfg.buffer.f32,
+ mConfig.inputCfg.buffer.f32,
+ safeInputOutputSampleCount);
+#else
+ accumulate_i16(
+ mConfig.outputCfg.buffer.s16,
+ mConfig.inputCfg.buffer.s16,
+ safeInputOutputSampleCount);
+#endif
+ };
+ const auto copyInputToOutput = [this, safeInputOutputSampleCount]() {
+#ifdef FLOAT_EFFECT_CHAIN
+ memcpy(
+ mConfig.outputCfg.buffer.f32,
+ mConfig.inputCfg.buffer.f32,
+ safeInputOutputSampleCount * sizeof(*mConfig.outputCfg.buffer.f32));
+
+#else
+ memcpy(
+ mConfig.outputCfg.buffer.s16,
+ mConfig.inputCfg.buffer.s16,
+ safeInputOutputSampleCount * sizeof(*mConfig.outputCfg.buffer.s16));
+#endif
+ };
+
if (isProcessEnabled()) {
int ret;
if (isProcessImplemented()) {
@@ -308,97 +345,69 @@
static_assert(sizeof(float) <= sizeof(int32_t),
"in-place conversion requires sizeof(float) <= sizeof(int32_t)");
- const int32_t * const p32 = mConfig.inputCfg.buffer.s32;
- float * const pFloat = mConfig.inputCfg.buffer.f32;
- memcpy_to_float_from_q4_27(pFloat, p32, mConfig.inputCfg.buffer.frameCount);
- } else {
- memcpy_to_i16_from_q4_27(mConfig.inputCfg.buffer.s16,
+ memcpy_to_float_from_q4_27(
+ mConfig.inputCfg.buffer.f32,
+ mConfig.inputCfg.buffer.s32,
+ mConfig.inputCfg.buffer.frameCount);
+ } else
+#endif
+ {
+ memcpy_to_i16_from_q4_27(
+ mConfig.inputCfg.buffer.s16,
mConfig.inputCfg.buffer.s32,
mConfig.inputCfg.buffer.frameCount);
}
-#else
- memcpy_to_i16_from_q4_27(mConfig.inputCfg.buffer.s16,
- mConfig.inputCfg.buffer.s32,
- mConfig.inputCfg.buffer.frameCount);
-#endif
}
#ifdef FLOAT_EFFECT_CHAIN
- if (mSupportsFloat) {
- ret = mEffectInterface->process();
- } else {
- { // convert input to int16_t as effect doesn't support float.
- if (!auxType) {
- if (mInBuffer16.get() == nullptr) {
- ALOGW("%s: mInBuffer16 is null, bypassing", __func__);
- goto data_bypass;
- }
- const float * const pIn = mInBuffer->audioBuffer()->f32;
- int16_t * const pIn16 = mInBuffer16->audioBuffer()->s16;
- memcpy_to_i16_from_float(
- pIn16, pIn, inChannelCount * mConfig.inputCfg.buffer.frameCount);
+ if (!mSupportsFloat) { // convert input to int16_t as effect doesn't support float.
+ if (!auxType) {
+ if (mInConversionBuffer.get() == nullptr) {
+ ALOGW("%s: mInConversionBuffer is null, bypassing", __func__);
+ goto data_bypass;
}
- if (mConfig.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
- if (mOutBuffer16.get() == nullptr) {
- ALOGW("%s: mOutBuffer16 is null, bypassing", __func__);
- goto data_bypass;
- }
- int16_t * const pOut16 = mOutBuffer16->audioBuffer()->s16;
- const float * const pOut = mOutBuffer->audioBuffer()->f32;
- memcpy_to_i16_from_float(
- pOut16,
- pOut,
- outChannelCount * mConfig.outputCfg.buffer.frameCount);
- }
+ memcpy_to_i16_from_float(
+ mInConversionBuffer->audioBuffer()->s16,
+ mInBuffer->audioBuffer()->f32,
+ inChannelCount * mConfig.inputCfg.buffer.frameCount);
}
-
- ret = mEffectInterface->process();
-
- { // convert output back to float.
- const int16_t * const pOut16 = mOutBuffer16->audioBuffer()->s16;
- float * const pOut = mOutBuffer->audioBuffer()->f32;
- memcpy_to_float_from_i16(
- pOut, pOut16, outChannelCount * mConfig.outputCfg.buffer.frameCount);
+ if (mConfig.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
+ if (mOutConversionBuffer.get() == nullptr) {
+ ALOGW("%s: mOutConversionBuffer is null, bypassing", __func__);
+ goto data_bypass;
+ }
+ memcpy_to_i16_from_float(
+ mOutConversionBuffer->audioBuffer()->s16,
+ mOutBuffer->audioBuffer()->f32,
+ outChannelCount * mConfig.outputCfg.buffer.frameCount);
}
}
-#else
+#endif
+
ret = mEffectInterface->process();
+
+#ifdef FLOAT_EFFECT_CHAIN
+ if (!mSupportsFloat) { // convert output int16_t back to float.
+ memcpy_to_float_from_i16(
+ mOutBuffer->audioBuffer()->f32,
+ mOutConversionBuffer->audioBuffer()->s16,
+ outChannelCount * mConfig.outputCfg.buffer.frameCount);
+ }
#endif
} else {
#ifdef FLOAT_EFFECT_CHAIN
data_bypass:
#endif
if (!auxType /* aux effects do not require data bypass */
- && mConfig.inputCfg.buffer.raw != mConfig.outputCfg.buffer.raw
- && inChannelCount == outChannelCount) {
- const size_t sampleCount = std::min(
- mConfig.inputCfg.buffer.frameCount,
- mConfig.outputCfg.buffer.frameCount) * outChannelCount;
-
-#ifdef FLOAT_EFFECT_CHAIN
- const float * const in = mConfig.inputCfg.buffer.f32;
- float * const out = mConfig.outputCfg.buffer.f32;
-
+ && mConfig.inputCfg.buffer.raw != mConfig.outputCfg.buffer.raw) {
if (mConfig.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
- accumulate_float(out, in, sampleCount);
+ accumulateInputToOutput();
} else {
- memcpy(mConfig.outputCfg.buffer.f32, mConfig.inputCfg.buffer.f32,
- sampleCount * sizeof(*mConfig.outputCfg.buffer.f32));
+ copyInputToOutput();
}
-
-#else
- const int16_t * const in = mConfig.inputCfg.buffer.s16;
- int16_t * const out = mConfig.outputCfg.buffer.s16;
-
- if (mConfig.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
- accumulate_i16(out, in, sampleCount);
- } else {
- memcpy(mConfig.outputCfg.buffer.s16, mConfig.inputCfg.buffer.s16,
- sampleCount * sizeof(*mConfig.outputCfg.buffer.s16));
- }
-#endif
}
ret = -ENODATA;
}
+
// force transition to IDLE state when engine is ready
if (mState == STOPPED && ret == -ENODATA) {
mDisableWaitCnt = 1;
@@ -417,21 +426,8 @@
// If an insert effect is idle and input buffer is different from output buffer,
// accumulate input onto output
sp<EffectChain> chain = mChain.promote();
- if (chain != 0
- && chain->activeTrackCnt() != 0
- && inChannelCount == outChannelCount) {
- const size_t sampleCount = std::min(
- mConfig.inputCfg.buffer.frameCount,
- mConfig.outputCfg.buffer.frameCount) * outChannelCount;
-#ifdef FLOAT_EFFECT_CHAIN
- const float * const in = mConfig.inputCfg.buffer.f32;
- float * const out = mConfig.outputCfg.buffer.f32;
- accumulate_float(out, in, sampleCount);
-#else
- const int16_t * const in = mConfig.inputCfg.buffer.s16;
- int16_t * const out = mConfig.outputCfg.buffer.s16;
- accumulate_i16(out, in, sampleCount);
-#endif
+ if (chain.get() != nullptr && chain->activeTrackCnt() != 0) {
+ accumulateInputToOutput();
}
}
}
@@ -906,7 +902,7 @@
mEffectInterface->setInBuffer(buffer);
#ifdef FLOAT_EFFECT_CHAIN
- // aux effects do in place conversion to float - we don't allocate mInBuffer16 for them.
+ // aux effects do in place conversion to float - we don't allocate mInConversionBuffer.
// Theoretically insert effects can also do in-place conversions (destroying
// the original buffer) when the output buffer is identical to the input buffer,
// but we don't optimize for it here.
@@ -920,17 +916,17 @@
ALOGV("%s: setInBuffer updating for inChannels:%d inFrameCount:%zu total size:%zu",
__func__, inChannels, inFrameCount, size);
- if (size > 0 && (mInBuffer16.get() == nullptr || size > mInBuffer16->getSize())) {
- mInBuffer16.clear();
- ALOGV("%s: allocating mInBuffer16 %zu", __func__, size);
- (void)EffectBufferHalInterface::allocate(size, &mInBuffer16);
+ if (size > 0 && (mInConversionBuffer.get() == nullptr
+ || size > mInConversionBuffer->getSize())) {
+ mInConversionBuffer.clear();
+ ALOGV("%s: allocating mInConversionBuffer %zu", __func__, size);
+ (void)EffectBufferHalInterface::allocate(size, &mInConversionBuffer);
}
- if (mInBuffer16.get() != nullptr) {
- // FIXME: confirm buffer has enough size.
- mInBuffer16->setFrameCount(inFrameCount);
- mEffectInterface->setInBuffer(mInBuffer16);
+ if (mInConversionBuffer.get() != nullptr) {
+ mInConversionBuffer->setFrameCount(inFrameCount);
+ mEffectInterface->setInBuffer(mInConversionBuffer);
} else if (size > 0) {
- ALOGE("%s cannot create mInBuffer16", __func__);
+ ALOGE("%s cannot create mInConversionBuffer", __func__);
}
}
#endif
@@ -948,7 +944,7 @@
mEffectInterface->setOutBuffer(buffer);
#ifdef FLOAT_EFFECT_CHAIN
- // Note: Any effect that does not accumulate does not need mOutBuffer16 and
+ // Note: Any effect that does not accumulate does not need mOutConversionBuffer and
// can do in-place conversion from int16_t to float. We don't optimize here.
if (!mSupportsFloat && mOutBuffer.get() != nullptr) {
const size_t outFrameCount = mConfig.outputCfg.buffer.frameCount;
@@ -958,16 +954,17 @@
ALOGV("%s: setOutBuffer updating for outChannels:%d outFrameCount:%zu total size:%zu",
__func__, outChannels, outFrameCount, size);
- if (size > 0 && (mOutBuffer16.get() == nullptr || size > mOutBuffer16->getSize())) {
- mOutBuffer16.clear();
- ALOGV("%s: allocating mOutBuffer16 %zu", __func__, size);
- (void)EffectBufferHalInterface::allocate(size, &mOutBuffer16);
+ if (size > 0 && (mOutConversionBuffer.get() == nullptr
+ || size > mOutConversionBuffer->getSize())) {
+ mOutConversionBuffer.clear();
+ ALOGV("%s: allocating mOutConversionBuffer %zu", __func__, size);
+ (void)EffectBufferHalInterface::allocate(size, &mOutConversionBuffer);
}
- if (mOutBuffer16.get() != nullptr) {
- mOutBuffer16->setFrameCount(outFrameCount);
- mEffectInterface->setOutBuffer(mOutBuffer16);
+ if (mOutConversionBuffer.get() != nullptr) {
+ mOutConversionBuffer->setFrameCount(outFrameCount);
+ mEffectInterface->setOutBuffer(mOutConversionBuffer);
} else if (size > 0) {
- ALOGE("%s cannot create mOutBuffer16", __func__);
+ ALOGE("%s cannot create mOutConversionBuffer", __func__);
}
}
#endif
@@ -1241,6 +1238,20 @@
return s;
}
+static std::string dumpInOutBuffer(bool isInput, const sp<EffectBufferHalInterface> &buffer) {
+ std::stringstream ss;
+
+ if (buffer.get() == nullptr) {
+ return "nullptr"; // make different than below
+ } else if (buffer->externalData() != nullptr) {
+ ss << (isInput ? buffer->externalData() : buffer->audioBuffer()->raw)
+ << " -> "
+ << (isInput ? buffer->audioBuffer()->raw : buffer->externalData());
+ } else {
+ ss << buffer->audioBuffer()->raw;
+ }
+ return ss.str();
+}
void AudioFlinger::EffectModule::dump(int fd, const Vector<String16>& args __unused)
{
@@ -1305,19 +1316,13 @@
result.append(buffer);
#ifdef FLOAT_EFFECT_CHAIN
- if (!mSupportsFloat) {
- int16_t* pIn16 = mInBuffer16 != 0 ? mInBuffer16->audioBuffer()->s16 : NULL;
- int16_t* pOut16 = mOutBuffer16 != 0 ? mOutBuffer16->audioBuffer()->s16 : NULL;
- result.append("\t\t- Float and int16 buffers\n");
- result.append("\t\t\tIn_float In_int16 Out_float Out_int16\n");
- snprintf(buffer, SIZE,"\t\t\t%p %p %p %p\n",
- mConfig.inputCfg.buffer.raw,
- pIn16,
- pOut16,
- mConfig.outputCfg.buffer.raw);
- result.append(buffer);
- }
+ result.appendFormat("\t\t- HAL buffers:\n"
+ "\t\t\tIn(%s) InConversion(%s) Out(%s) OutConversion(%s)\n",
+ dumpInOutBuffer(true /* isInput */, mInBuffer).c_str(),
+ dumpInOutBuffer(true /* isInput */, mInConversionBuffer).c_str(),
+ dumpInOutBuffer(false /* isInput */, mOutBuffer).c_str(),
+ dumpInOutBuffer(false /* isInput */, mOutConversionBuffer).c_str());
#endif
snprintf(buffer, SIZE, "\t\t%zu Clients:\n", mHandles.size());
@@ -2161,19 +2166,6 @@
}
}
-static void dumpInOutBuffer(
- char *dump, size_t dumpSize, bool isInput, EffectBufferHalInterface *buffer) {
- if (buffer == nullptr) {
- snprintf(dump, dumpSize, "%p", buffer);
- } else if (buffer->externalData() != nullptr) {
- snprintf(dump, dumpSize, "%p -> %p",
- isInput ? buffer->externalData() : buffer->audioBuffer()->raw,
- isInput ? buffer->audioBuffer()->raw : buffer->externalData());
- } else {
- snprintf(dump, dumpSize, "%p", buffer->audioBuffer()->raw);
- }
-}
-
void AudioFlinger::EffectChain::dump(int fd, const Vector<String16>& args)
{
const size_t SIZE = 256;
@@ -2191,15 +2183,13 @@
result.append("\tCould not lock mutex:\n");
}
- char inBufferStr[64], outBufferStr[64];
- dumpInOutBuffer(inBufferStr, sizeof(inBufferStr), true, mInBuffer.get());
- dumpInOutBuffer(outBufferStr, sizeof(outBufferStr), false, mOutBuffer.get());
- snprintf(buffer, SIZE, "\t%-*s%-*s Active tracks:\n",
- (int)strlen(inBufferStr), "In buffer ",
- (int)strlen(outBufferStr), "Out buffer ");
- result.append(buffer);
- snprintf(buffer, SIZE, "\t%s %s %d\n", inBufferStr, outBufferStr, mActiveTrackCnt);
- result.append(buffer);
+ const std::string inBufferStr = dumpInOutBuffer(true /* isInput */, mInBuffer);
+ const std::string outBufferStr = dumpInOutBuffer(false /* isInput */, mOutBuffer);
+ result.appendFormat("\t%-*s%-*s Active tracks:\n",
+ (int)inBufferStr.size(), "In buffer ",
+ (int)outBufferStr.size(), "Out buffer ");
+ result.appendFormat("\t%s %s %d\n",
+ inBufferStr.c_str(), outBufferStr.c_str(), mActiveTrackCnt);
write(fd, result.string(), result.size());
for (size_t i = 0; i < numEffects; ++i) {
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index 1864e0f..eea3208 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -171,8 +171,8 @@
#ifdef FLOAT_EFFECT_CHAIN
bool mSupportsFloat; // effect supports float processing
- sp<EffectBufferHalInterface> mInBuffer16; // Buffers for interacting with HAL at 16 bits
- sp<EffectBufferHalInterface> mOutBuffer16;
+ sp<EffectBufferHalInterface> mInConversionBuffer; // Buffers for HAL conversion if needed.
+ sp<EffectBufferHalInterface> mOutConversionBuffer;
#endif
};
diff --git a/services/audioflinger/FastMixerDumpState.cpp b/services/audioflinger/FastMixerDumpState.cpp
index 6475f22..2e4fb8c 100644
--- a/services/audioflinger/FastMixerDumpState.cpp
+++ b/services/audioflinger/FastMixerDumpState.cpp
@@ -78,7 +78,12 @@
uint32_t bounds = mBounds;
uint32_t newestOpen = bounds & 0xFFFF;
uint32_t oldestClosed = bounds >> 16;
- uint32_t n = (newestOpen - oldestClosed) & 0xFFFF;
+
+ //uint32_t n = (newestOpen - oldestClosed) & 0xFFFF;
+ uint32_t n;
+ __builtin_sub_overflow(newestOpen, oldestClosed, &n);
+ n = n & 0xFFFF;
+
if (n > mSamplingN) {
ALOGE("too many samples %u", n);
n = mSamplingN;
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 1445572..cdd8ca0 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -1102,11 +1102,12 @@
void AudioFlinger::PlaybackThread::Track::triggerEvents(AudioSystem::sync_event_t type)
{
- for (size_t i = 0; i < mSyncEvents.size(); i++) {
+ for (size_t i = 0; i < mSyncEvents.size();) {
if (mSyncEvents[i]->type() == type) {
mSyncEvents[i]->trigger();
mSyncEvents.removeAt(i);
- i--;
+ } else {
+ ++i;
}
}
}
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index b169bac..d9cd121 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -34,8 +34,8 @@
class AudioInputDescriptor: public AudioPortConfig, public AudioSessionInfoProvider
{
public:
- explicit AudioInputDescriptor(const sp<IOProfile>& profile);
- void setIoHandle(audio_io_handle_t ioHandle);
+ explicit AudioInputDescriptor(const sp<IOProfile>& profile,
+ AudioPolicyClientInterface *clientInterface);
audio_port_handle_t getId() const;
audio_module_handle_t getModuleHandle() const;
uint32_t getOpenRefCount() const;
@@ -73,6 +73,14 @@
void setPatchHandle(audio_patch_handle_t handle);
+ status_t open(const audio_config_t *config,
+ audio_devices_t device,
+ const String8& address,
+ audio_source_t source,
+ audio_input_flags_t flags,
+ audio_io_handle_t *input);
+ void close();
+
private:
audio_patch_handle_t mPatchHandle;
audio_port_handle_t mId;
@@ -85,6 +93,7 @@
// a particular input started and prevent preemption of this active input by this session.
// We also inherit sessions from the preempted input to avoid a 3 way preemption loop etc...
SortedVector<audio_session_t> mPreemptedSessions;
+ AudioPolicyClientInterface *mClientInterface;
};
class AudioInputCollection :
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index c09cb5a..0be8fc1 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -101,8 +101,6 @@
status_t dump(int fd);
- void setIoHandle(audio_io_handle_t ioHandle);
-
virtual audio_devices_t device() const;
virtual bool sharesHwModuleWith(const sp<AudioOutputDescriptor>& outputDesc);
virtual audio_devices_t supportedDevices();
@@ -122,6 +120,14 @@
const struct audio_port_config *srcConfig = NULL) const;
virtual void toAudioPort(struct audio_port *port) const;
+ status_t open(const audio_config_t *config,
+ audio_devices_t device,
+ const String8& address,
+ audio_stream_type_t stream,
+ audio_output_flags_t flags,
+ audio_io_handle_t *output);
+ void close();
+
const sp<IOProfile> mProfile; // I/O profile this output derives from
audio_io_handle_t mIoHandle; // output handle
uint32_t mLatency; //
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
index 635fe4d..4316307 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
@@ -27,14 +27,12 @@
sp<AudioPort> AudioPortVector::findByTagName(const String8 &tagName) const
{
- sp<AudioPort> port = 0;
- for (size_t i = 0; i < size(); i++) {
- if (itemAt(i)->getTagName() == tagName) {
- port = itemAt(i);
- break;
+ for (const auto& port : *this) {
+ if (port->getTagName() == tagName) {
+ return port;
}
}
- return port;
+ return nullptr;
}
status_t AudioRouteVector::dump(int fd, int spaces) const
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index 2492ed6..624e688 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "APM::AudioInputDescriptor"
//#define LOG_NDEBUG 0
+#include <AudioPolicyInterface.h>
#include "AudioInputDescriptor.h"
#include "IOProfile.h"
#include "AudioGain.h"
@@ -26,10 +27,12 @@
namespace android {
-AudioInputDescriptor::AudioInputDescriptor(const sp<IOProfile>& profile)
+AudioInputDescriptor::AudioInputDescriptor(const sp<IOProfile>& profile,
+ AudioPolicyClientInterface *clientInterface)
: mIoHandle(0),
mDevice(AUDIO_DEVICE_NONE), mPolicyMix(NULL),
- mProfile(profile), mPatchHandle(AUDIO_PATCH_HANDLE_NONE), mId(0)
+ mProfile(profile), mPatchHandle(AUDIO_PATCH_HANDLE_NONE), mId(0),
+ mClientInterface(clientInterface)
{
if (profile != NULL) {
profile->pickAudioProfile(mSamplingRate, mChannelMask, mFormat);
@@ -39,12 +42,6 @@
}
}
-void AudioInputDescriptor::setIoHandle(audio_io_handle_t ioHandle)
-{
- mId = AudioPort::getNextUniqueId();
- mIoHandle = ioHandle;
-}
-
audio_module_handle_t AudioInputDescriptor::getModuleHandle() const
{
if (mProfile == 0) {
@@ -192,6 +189,66 @@
return config;
}
+status_t AudioInputDescriptor::open(const audio_config_t *config,
+ audio_devices_t device,
+ const String8& address,
+ audio_source_t source,
+ audio_input_flags_t flags,
+ audio_io_handle_t *input)
+{
+ audio_config_t lConfig;
+ if (config == nullptr) {
+ lConfig = AUDIO_CONFIG_INITIALIZER;
+ lConfig.sample_rate = mSamplingRate;
+ lConfig.channel_mask = mChannelMask;
+ lConfig.format = mFormat;
+ } else {
+ lConfig = *config;
+ }
+
+ String8 lAddress = address;
+ if (lAddress == "") {
+ const DeviceVector& supportedDevices = mProfile->getSupportedDevices();
+ const DeviceVector& devicesForType = supportedDevices.getDevicesFromType(device);
+ lAddress = devicesForType.size() > 0 ? devicesForType.itemAt(0)->mAddress
+ : String8("");
+ }
+
+ mDevice = device;
+
+ ALOGV("opening input for device %08x address %s profile %p name %s",
+ mDevice, lAddress.string(), mProfile.get(), mProfile->getName().string());
+
+ status_t status = mClientInterface->openInput(mProfile->getModuleHandle(),
+ input,
+ &lConfig,
+ &mDevice,
+ lAddress,
+ source,
+ flags);
+ LOG_ALWAYS_FATAL_IF(mDevice != device,
+ "%s openInput returned device %08x when given device %08x",
+ __FUNCTION__, mDevice, device);
+
+ if (status == NO_ERROR) {
+ mSamplingRate = lConfig.sample_rate;
+ mChannelMask = lConfig.channel_mask;
+ mFormat = lConfig.format;
+ mId = AudioPort::getNextUniqueId();
+ mIoHandle = *input;
+ }
+
+ return status;
+}
+
+
+void AudioInputDescriptor::close()
+{
+ if (mIoHandle != AUDIO_IO_HANDLE_NONE) {
+ mClientInterface->closeInput(mIoHandle);
+ }
+}
+
status_t AudioInputDescriptor::dump(int fd)
{
const size_t SIZE = 256;
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 4d3c3b5..f96c5bc 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -23,6 +23,7 @@
#include "AudioGain.h"
#include "Volume.h"
#include "HwModule.h"
+#include <media/AudioParameter.h>
#include <media/AudioPolicy.h>
// A device mask for all audio output devices that are considered "remote" when evaluating
@@ -231,13 +232,6 @@
}
}
-void SwAudioOutputDescriptor::setIoHandle(audio_io_handle_t ioHandle)
-{
- mId = AudioPort::getNextUniqueId();
- mIoHandle = ioHandle;
-}
-
-
status_t SwAudioOutputDescriptor::dump(int fd)
{
const size_t SIZE = 256;
@@ -387,6 +381,87 @@
return changed;
}
+status_t SwAudioOutputDescriptor::open(const audio_config_t *config,
+ audio_devices_t device,
+ const String8& address,
+ audio_stream_type_t stream,
+ audio_output_flags_t flags,
+ audio_io_handle_t *output)
+{
+ audio_config_t lConfig;
+ if (config == nullptr) {
+ lConfig = AUDIO_CONFIG_INITIALIZER;
+ lConfig.sample_rate = mSamplingRate;
+ lConfig.channel_mask = mChannelMask;
+ lConfig.format = mFormat;
+ } else {
+ lConfig = *config;
+ }
+
+ String8 lAddress = address;
+ if (lAddress == "") {
+ const DeviceVector& supportedDevices = mProfile->getSupportedDevices();
+ const DeviceVector& devicesForType = supportedDevices.getDevicesFromType(device);
+ lAddress = devicesForType.size() > 0 ? devicesForType.itemAt(0)->mAddress
+ : String8("");
+ }
+
+ mDevice = device;
+ // if the selected profile is offloaded and no offload info was specified,
+ // create a default one
+ if ((mProfile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) &&
+ lConfig.offload_info.format == AUDIO_FORMAT_DEFAULT) {
+ flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
+ lConfig.offload_info = AUDIO_INFO_INITIALIZER;
+ lConfig.offload_info.sample_rate = lConfig.sample_rate;
+ lConfig.offload_info.channel_mask = lConfig.channel_mask;
+ lConfig.offload_info.format = lConfig.format;
+ lConfig.offload_info.stream_type = stream;
+ lConfig.offload_info.duration_us = -1;
+ lConfig.offload_info.has_video = true; // conservative
+ lConfig.offload_info.is_streaming = true; // likely
+ }
+
+ mFlags = (audio_output_flags_t)(mFlags | flags);
+
+ ALOGV("opening output for device %08x address %s profile %p name %s",
+ mDevice, lAddress.string(), mProfile.get(), mProfile->getName().string());
+
+ status_t status = mClientInterface->openOutput(mProfile->getModuleHandle(),
+ output,
+ &lConfig,
+ &mDevice,
+ lAddress,
+ &mLatency,
+ mFlags);
+ LOG_ALWAYS_FATAL_IF(mDevice != device,
+ "%s openOutput returned device %08x when given device %08x",
+ __FUNCTION__, mDevice, device);
+
+ if (status == NO_ERROR) {
+ mSamplingRate = lConfig.sample_rate;
+ mChannelMask = lConfig.channel_mask;
+ mFormat = lConfig.format;
+ mId = AudioPort::getNextUniqueId();
+ mIoHandle = *output;
+ }
+
+ return status;
+}
+
+
+void SwAudioOutputDescriptor::close()
+{
+ if (mIoHandle != AUDIO_IO_HANDLE_NONE) {
+ AudioParameter param;
+ param.add(String8("closing"), String8("true"));
+ mClientInterface->setParameters(mIoHandle, param.toString());
+
+ mClientInterface->closeOutput(mIoHandle);
+ }
+}
+
+
// HwAudioOutputDescriptor implementation
HwAudioOutputDescriptor::HwAudioOutputDescriptor(const sp<AudioSourceDescriptor>& source,
AudioPolicyClientInterface *clientInterface)
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
index fcf9070..53e694b 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
@@ -74,11 +74,11 @@
SortedVector<audio_format_t> flatenedFormats;
SampleRateVector flatenedRates;
ChannelsVector flatenedChannels;
- for (size_t profileIndex = 0; profileIndex < mProfiles.size(); profileIndex++) {
- if (mProfiles[profileIndex]->isValid()) {
- audio_format_t formatToExport = mProfiles[profileIndex]->getFormat();
- const SampleRateVector &ratesToExport = mProfiles[profileIndex]->getSampleRates();
- const ChannelsVector &channelsToExport = mProfiles[profileIndex]->getChannels();
+ for (const auto& profile : mProfiles) {
+ if (profile->isValid()) {
+ audio_format_t formatToExport = profile->getFormat();
+ const SampleRateVector &ratesToExport = profile->getSampleRates();
+ const ChannelsVector &channelsToExport = profile->getChannels();
if (flatenedFormats.indexOf(formatToExport) < 0) {
flatenedFormats.add(formatToExport);
@@ -130,14 +130,12 @@
void AudioPort::importAudioPort(const sp<AudioPort>& port, bool force __unused)
{
- size_t indexToImport;
- for (indexToImport = 0; indexToImport < port->mProfiles.size(); indexToImport++) {
- const sp<AudioProfile> &profileToImport = port->mProfiles[indexToImport];
+ for (const auto& profileToImport : port->mProfiles) {
if (profileToImport->isValid()) {
// Import only valid port, i.e. valid format, non empty rates and channels masks
bool hasSameProfile = false;
- for (size_t profileIndex = 0; profileIndex < mProfiles.size(); profileIndex++) {
- if (*mProfiles[profileIndex] == *profileToImport) {
+ for (const auto& profile : mProfiles) {
+ if (*profile == *profileToImport) {
// never import a profile twice
hasSameProfile = true;
break;
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
index 98f7a94..7657c4d 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
@@ -233,8 +233,7 @@
return NO_ERROR;
}
- for (size_t i = 0; i < size(); i++) {
- const sp<AudioProfile> profile = itemAt(i);
+ for (const auto& profile : *this) {
if (profile->checkExact(samplingRate, channelMask, format) == NO_ERROR) {
return NO_ERROR;
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index a2c1165..fdeea29 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -60,7 +60,7 @@
void DeviceVector::refreshTypes()
{
mDeviceTypes = AUDIO_DEVICE_NONE;
- for(size_t i = 0; i < size(); i++) {
+ for (size_t i = 0; i < size(); i++) {
mDeviceTypes |= itemAt(i)->type();
}
ALOGV("DeviceVector::refreshTypes() mDeviceTypes %08x", mDeviceTypes);
@@ -68,7 +68,7 @@
ssize_t DeviceVector::indexOf(const sp<DeviceDescriptor>& item) const
{
- for(size_t i = 0; i < size(); i++) {
+ for (size_t i = 0; i < size(); i++) {
if (item->equals(itemAt(i))) {
return i;
}
@@ -78,12 +78,15 @@
void DeviceVector::add(const DeviceVector &devices)
{
- for (size_t i = 0; i < devices.size(); i++) {
- sp<DeviceDescriptor> device = devices.itemAt(i);
+ bool added = false;
+ for (const auto& device : devices) {
if (indexOf(device) < 0 && SortedVector::add(device) >= 0) {
- refreshTypes();
+ added = true;
}
}
+ if (added) {
+ refreshTypes();
+ }
}
ssize_t DeviceVector::add(const sp<DeviceDescriptor>& item)
@@ -148,14 +151,12 @@
sp<DeviceDescriptor> DeviceVector::getDeviceFromId(audio_port_handle_t id) const
{
- sp<DeviceDescriptor> device;
- for (size_t i = 0; i < size(); i++) {
- if (itemAt(i)->getId() == id) {
- device = itemAt(i);
- break;
+ for (const auto& device : *this) {
+ if (device->getId() == id) {
+ return device;
}
}
- return device;
+ return nullptr;
}
DeviceVector DeviceVector::getDevicesFromType(audio_devices_t type) const
@@ -180,11 +181,9 @@
audio_devices_t type, const String8& address) const
{
DeviceVector devices;
- for (size_t i = 0; i < size(); i++) {
- if (itemAt(i)->type() == type) {
- if (itemAt(i)->mAddress == address) {
- devices.add(itemAt(i));
- }
+ for (const auto& device : *this) {
+ if (device->type() == type && device->mAddress == address) {
+ devices.add(device);
}
}
return devices;
@@ -192,14 +191,12 @@
sp<DeviceDescriptor> DeviceVector::getDeviceFromTagName(const String8 &tagName) const
{
- sp<DeviceDescriptor> device;
- for (size_t i = 0; i < size(); i++) {
- if (itemAt(i)->getTagName() == tagName) {
- device = itemAt(i);
- break;
+ for (const auto& device : *this) {
+ if (device->getTagName() == tagName) {
+ return device;
}
}
- return device;
+ return nullptr;
}
status_t DeviceVector::dump(int fd, const String8 &tag, int spaces, bool verbose) const
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index cc56fb8..b4feb4d 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -154,10 +154,9 @@
DeviceVector HwModule::getRouteSourceDevices(const sp<AudioRoute> &route) const
{
DeviceVector sourceDevices;
- Vector <sp<AudioPort> > sources = route->getSources();
- for (size_t i = 0; i < sources.size(); i++) {
- if (sources[i]->getType() == AUDIO_PORT_TYPE_DEVICE) {
- sourceDevices.add(mDeclaredDevices.getDeviceFromTagName(sources[i]->getTagName()));
+ for (const auto& source : route->getSources()) {
+ if (source->getType() == AUDIO_PORT_TYPE_DEVICE) {
+ sourceDevices.add(mDeclaredDevices.getDeviceFromTagName(source->getTagName()));
}
}
return sourceDevices;
@@ -173,17 +172,15 @@
void HwModule::refreshSupportedDevices()
{
// Now updating the streams (aka IOProfile until now) supported devices
- for (size_t i = 0; i < mInputProfiles.size(); i++) {
- sp<IOProfile> stream = mInputProfiles[i];
+ for (const auto& stream : mInputProfiles) {
DeviceVector sourceDevices;
- const AudioRouteVector &routes = stream->getRoutes();
- for (size_t j = 0; j < routes.size(); j++) {
- sp<AudioPort> sink = routes[j]->getSink();
+ for (const auto& route : stream->getRoutes()) {
+ sp<AudioPort> sink = route->getSink();
if (sink == 0 || stream != sink) {
ALOGE("%s: Invalid route attached to input stream", __FUNCTION__);
continue;
}
- DeviceVector sourceDevicesForRoute = getRouteSourceDevices(routes[j]);
+ DeviceVector sourceDevicesForRoute = getRouteSourceDevices(route);
if (sourceDevicesForRoute.isEmpty()) {
ALOGE("%s: invalid source devices for %s", __FUNCTION__, stream->getName().string());
continue;
@@ -196,17 +193,15 @@
}
stream->setSupportedDevices(sourceDevices);
}
- for (size_t i = 0; i < mOutputProfiles.size(); i++) {
- sp<IOProfile> stream = mOutputProfiles[i];
+ for (const auto& stream : mOutputProfiles) {
DeviceVector sinkDevices;
- const AudioRouteVector &routes = stream->getRoutes();
- for (size_t j = 0; j < routes.size(); j++) {
- sp<AudioPort> source = routes[j]->getSources().findByTagName(stream->getTagName());
+ for (const auto& route : stream->getRoutes()) {
+ sp<AudioPort> source = route->getSources().findByTagName(stream->getTagName());
if (source == 0 || stream != source) {
ALOGE("%s: Invalid route attached to output stream", __FUNCTION__);
continue;
}
- sp<DeviceDescriptor> sinkDevice = getRouteSinkDevice(routes[j]);
+ sp<DeviceDescriptor> sinkDevice = getRouteSinkDevice(route);
if (sinkDevice == 0) {
ALOGE("%s: invalid sink device for %s", __FUNCTION__, stream->getName().string());
continue;
@@ -252,60 +247,40 @@
sp <HwModule> HwModuleCollection::getModuleFromName(const char *name) const
{
- sp <HwModule> module;
-
- for (size_t i = 0; i < size(); i++)
- {
- if (strcmp(itemAt(i)->getName(), name) == 0) {
- return itemAt(i);
+ for (const auto& module : *this) {
+ if (strcmp(module->getName(), name) == 0) {
+ return module;
}
}
- return module;
+ return nullptr;
}
-
sp <HwModule> HwModuleCollection::getModuleForDevice(audio_devices_t device) const
{
- sp <HwModule> module;
-
- for (size_t i = 0; i < size(); i++) {
- if (itemAt(i)->getHandle() == 0) {
- continue;
- }
- if (audio_is_output_device(device)) {
- for (size_t j = 0; j < itemAt(i)->mOutputProfiles.size(); j++)
- {
- if (itemAt(i)->mOutputProfiles[j]->supportDevice(device)) {
- return itemAt(i);
- }
- }
- } else {
- for (size_t j = 0; j < itemAt(i)->mInputProfiles.size(); j++) {
- if (itemAt(i)->mInputProfiles[j]->supportDevice(device)) {
- return itemAt(i);
- }
+ for (const auto& module : *this) {
+ IOProfileCollection& profiles = audio_is_output_device(device) ?
+ module->mOutputProfiles : module->mInputProfiles;
+ for (const auto& profile : profiles) {
+ if (profile->supportDevice(device)) {
+ return module;
}
}
}
- return module;
+ return nullptr;
}
-sp<DeviceDescriptor> HwModuleCollection::getDeviceDescriptor(const audio_devices_t device,
- const char *device_address,
- const char *device_name,
- bool matchAdress) const
+sp<DeviceDescriptor> HwModuleCollection::getDeviceDescriptor(const audio_devices_t device,
+ const char *device_address,
+ const char *device_name,
+ bool matchAdress) const
{
- String8 address = (device_address == NULL) ? String8("") : String8(device_address);
+ String8 address = (device_address == nullptr) ? String8("") : String8(device_address);
// handle legacy remote submix case where the address was not always specified
if (device_distinguishes_on_address(device) && (address.length() == 0)) {
address = String8("0");
}
- for (size_t i = 0; i < size(); i++) {
- const sp<HwModule> hwModule = itemAt(i);
- if (hwModule->mHandle == 0) {
- continue;
- }
+ for (const auto& hwModule : *this) {
DeviceVector declaredDevices = hwModule->getDeclaredDevices();
DeviceVector deviceList = declaredDevices.getDevicesFromTypeAddr(device, address);
if (!deviceList.isEmpty()) {
@@ -340,4 +315,5 @@
return NO_ERROR;
}
+
} //namespace android
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 62cbfc1..7366378 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -88,7 +88,7 @@
const char *device_name)
{
ALOGV("setDeviceConnectionStateInt() device: 0x%X, state %d, address %s name %s",
-- device, state, device_address, device_name);
+ device, state, device_address, device_name);
// connect/disconnect only 1 device at a time
if (!audio_is_output_device(device) && !audio_is_input_device(device)) return BAD_VALUE;
@@ -183,14 +183,14 @@
checkOutputForAllStrategies();
// outputs must be closed after checkOutputForAllStrategies() is executed
if (!outputs.isEmpty()) {
- for (size_t i = 0; i < outputs.size(); i++) {
- sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(outputs[i]);
+ for (audio_io_handle_t output : outputs) {
+ sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(output);
// close unused outputs after device disconnection or direct outputs that have been
// opened by checkOutputsForDevice() to query dynamic parameters
if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) ||
(((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) &&
(desc->mDirectOpenCount == 0))) {
- closeOutput(outputs[i]);
+ closeOutput(output);
}
}
// check again after closing A2DP output to reset mA2dpSuspended if needed
@@ -499,9 +499,7 @@
// FIXME: would be better to refine to only inputs whose profile connects to the
// call TX device but this information is not in the audio patch and logic here must be
// symmetric to the one in startInput()
- Vector<sp <AudioInputDescriptor> > activeInputs = mInputs.getActiveInputs();
- for (size_t i = 0; i < activeInputs.size(); i++) {
- sp<AudioInputDescriptor> activeDesc = activeInputs[i];
+ for (const auto& activeDesc : mInputs.getActiveInputs()) {
if (activeDesc->hasSameHwModuleAs(txSourceDeviceDesc)) {
AudioSessionCollection activeSessions =
activeDesc->getAudioSessions(true /*activeOnly*/);
@@ -683,9 +681,7 @@
}
}
- Vector<sp <AudioInputDescriptor> > activeInputs = mInputs.getActiveInputs();
- for (size_t i = 0; i < activeInputs.size(); i++) {
- sp<AudioInputDescriptor> activeDesc = activeInputs[i];
+ for (const auto& activeDesc : mInputs.getActiveInputs()) {
audio_devices_t newDevice = getNewInputDevice(activeDesc);
// Force new input selection if the new device can not be reached via current input
if (activeDesc->mProfile->getSupportedDevices().types() &
@@ -721,12 +717,8 @@
sp<IOProfile> profile;
- for (size_t i = 0; i < mHwModules.size(); i++) {
- if (mHwModules[i]->mHandle == 0) {
- continue;
- }
- for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++) {
- sp<IOProfile> curProfile = mHwModules[i]->mOutputProfiles[j];
+ for (const auto& hwModule : mHwModules) {
+ for (const auto& curProfile : hwModule->mOutputProfiles) {
if (!curProfile->isCompatibleProfile(device, String8(""),
samplingRate, NULL /*updatedSamplingRate*/,
format, NULL /*updatedFormat*/,
@@ -827,12 +819,7 @@
// Explicit routing?
sp<DeviceDescriptor> deviceDesc;
if (*selectedDeviceId != AUDIO_PORT_HANDLE_NONE) {
- for (size_t i = 0; i < mAvailableOutputDevices.size(); i++) {
- if (mAvailableOutputDevices[i]->getId() == *selectedDeviceId) {
- deviceDesc = mAvailableOutputDevices[i];
- break;
- }
- }
+ deviceDesc = mAvailableOutputDevices.getDeviceFromId(*selectedDeviceId);
}
mOutputRoutes.addRoute(session, *stream, SessionRoute::SOURCE_TYPE_NA, deviceDesc, uid);
@@ -843,12 +830,10 @@
flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
}
- ALOGV("getOutputForAttr() device 0x%x, samplingRate %d, format %x, channelMask %x, flags %x",
+ ALOGV("getOutputForAttr() device 0x%x, sampling rate %d, format %x, channel mask %x, flags %x",
device, config->sample_rate, config->format, config->channel_mask, flags);
- *output = getOutputForDevice(device, session, *stream,
- config->sample_rate, config->format, config->channel_mask,
- flags, &config->offload_info);
+ *output = getOutputForDevice(device, session, *stream, config, flags);
if (*output == AUDIO_IO_HANDLE_NONE) {
mOutputRoutes.removeRoute(session);
return INVALID_OPERATION;
@@ -867,11 +852,8 @@
audio_devices_t device,
audio_session_t session,
audio_stream_type_t stream,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- audio_output_flags_t flags,
- const audio_offload_info_t *offloadInfo)
+ const audio_config_t *config,
+ audio_output_flags_t flags)
{
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
status_t status;
@@ -898,7 +880,7 @@
if (stream == AUDIO_STREAM_TTS) {
flags = AUDIO_OUTPUT_FLAG_TTS;
} else if (stream == AUDIO_STREAM_VOICE_CALL &&
- audio_is_linear_pcm(format)) {
+ audio_is_linear_pcm(config->format)) {
flags = (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_VOIP_RX |
AUDIO_OUTPUT_FLAG_DIRECT);
ALOGV("Set VoIP and Direct output flags for PCM format");
@@ -909,8 +891,8 @@
// skip direct output selection if the request can obviously be attached to a mixed output
// and not explicitly requested
if (((flags & AUDIO_OUTPUT_FLAG_DIRECT) == 0) &&
- audio_is_linear_pcm(format) && samplingRate <= SAMPLE_RATE_HZ_MAX &&
- audio_channel_count_from_out_mask(channelMask) <= 2) {
+ audio_is_linear_pcm(config->format) && config->sample_rate <= SAMPLE_RATE_HZ_MAX &&
+ audio_channel_count_from_out_mask(config->channel_mask) <= 2) {
goto non_direct_output;
}
@@ -924,9 +906,9 @@
if (((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0) ||
!(mEffects.isNonOffloadableEffectEnabled() || mMasterMono)) {
profile = getProfileForDirectOutput(device,
- samplingRate,
- format,
- channelMask,
+ config->sample_rate,
+ config->format,
+ config->channel_mask,
(audio_output_flags_t)flags);
}
@@ -939,9 +921,9 @@
outputDesc = desc;
// reuse direct output if currently open by the same client
// and configured with same parameters
- if ((samplingRate == outputDesc->mSamplingRate) &&
- audio_formats_match(format, outputDesc->mFormat) &&
- (channelMask == outputDesc->mChannelMask)) {
+ if ((config->sample_rate == outputDesc->mSamplingRate) &&
+ audio_formats_match(config->format, outputDesc->mFormat) &&
+ (config->channel_mask == outputDesc->mChannelMask)) {
if (session == outputDesc->mDirectClientSession) {
outputDesc->mDirectOpenCount++;
ALOGV("getOutputForDevice() reusing direct output %d for session %d",
@@ -961,65 +943,29 @@
closeOutput(outputDesc->mIoHandle);
}
- // if the selected profile is offloaded and no offload info was specified,
- // create a default one
- audio_offload_info_t defaultOffloadInfo = AUDIO_INFO_INITIALIZER;
- if ((profile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) && !offloadInfo) {
- flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
- defaultOffloadInfo.sample_rate = samplingRate;
- defaultOffloadInfo.channel_mask = channelMask;
- defaultOffloadInfo.format = format;
- defaultOffloadInfo.stream_type = stream;
- defaultOffloadInfo.bit_rate = 0;
- defaultOffloadInfo.duration_us = -1;
- defaultOffloadInfo.has_video = true; // conservative
- defaultOffloadInfo.is_streaming = true; // likely
- offloadInfo = &defaultOffloadInfo;
- }
-
outputDesc = new SwAudioOutputDescriptor(profile, mpClientInterface);
- outputDesc->mDevice = device;
- outputDesc->mLatency = 0;
- outputDesc->mFlags = (audio_output_flags_t)(outputDesc->mFlags | flags);
- audio_config_t config = AUDIO_CONFIG_INITIALIZER;
- config.sample_rate = samplingRate;
- config.channel_mask = channelMask;
- config.format = format;
- if (offloadInfo != NULL) {
- config.offload_info = *offloadInfo;
- }
- DeviceVector outputDevices = mAvailableOutputDevices.getDevicesFromType(device);
- String8 address = outputDevices.size() > 0 ? outputDevices.itemAt(0)->mAddress
- : String8("");
- status = mpClientInterface->openOutput(profile->getModuleHandle(),
- &output,
- &config,
- &outputDesc->mDevice,
- address,
- &outputDesc->mLatency,
- outputDesc->mFlags);
+ status = outputDesc->open(config, device, String8(""), stream, flags, &output);
// only accept an output with the requested parameters
if (status != NO_ERROR ||
- (samplingRate != 0 && samplingRate != config.sample_rate) ||
- (format != AUDIO_FORMAT_DEFAULT && !audio_formats_match(format, config.format)) ||
- (channelMask != 0 && channelMask != config.channel_mask)) {
- ALOGV("getOutputForDevice() failed opening direct output: output %d samplingRate %d %d,"
- "format %d %d, channelMask %04x %04x", output, samplingRate,
- outputDesc->mSamplingRate, format, outputDesc->mFormat, channelMask,
- outputDesc->mChannelMask);
+ (config->sample_rate != 0 && config->sample_rate != outputDesc->mSamplingRate) ||
+ (config->format != AUDIO_FORMAT_DEFAULT &&
+ !audio_formats_match(config->format, outputDesc->mFormat)) ||
+ (config->channel_mask != 0 && config->channel_mask != outputDesc->mChannelMask)) {
+ ALOGV("getOutputForDevice() failed opening direct output: output %d sample rate %d %d,"
+ "format %d %d, channel mask %04x %04x", output, config->sample_rate,
+ outputDesc->mSamplingRate, config->format, outputDesc->mFormat,
+ config->channel_mask, outputDesc->mChannelMask);
if (output != AUDIO_IO_HANDLE_NONE) {
- mpClientInterface->closeOutput(output);
+ outputDesc->close();
}
// fall back to mixer output if possible when the direct output could not be open
- if (audio_is_linear_pcm(format) && samplingRate <= SAMPLE_RATE_HZ_MAX) {
+ if (audio_is_linear_pcm(config->format) &&
+ config->sample_rate <= SAMPLE_RATE_HZ_MAX) {
goto non_direct_output;
}
return AUDIO_IO_HANDLE_NONE;
}
- outputDesc->mSamplingRate = config.sample_rate;
- outputDesc->mChannelMask = config.channel_mask;
- outputDesc->mFormat = config.format;
outputDesc->mRefCount[stream] = 0;
outputDesc->mStopTime[stream] = 0;
outputDesc->mDirectOpenCount = 1;
@@ -1045,18 +991,18 @@
// open a non direct output
// for non direct outputs, only PCM is supported
- if (audio_is_linear_pcm(format)) {
+ if (audio_is_linear_pcm(config->format)) {
// get which output is suitable for the specified stream. The actual
// routing change will happen when startOutput() will be called
SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(device, mOutputs);
// at this stage we should ignore the DIRECT flag as no direct output could be found earlier
flags = (audio_output_flags_t)(flags & ~AUDIO_OUTPUT_FLAG_DIRECT);
- output = selectOutput(outputs, flags, format);
+ output = selectOutput(outputs, flags, config->format);
}
ALOGW_IF((output == 0), "getOutputForDevice() could not find output for stream %d, "
- "samplingRate %d, format %d, channels %x, flags %x",
- stream, samplingRate, format, channelMask, flags);
+ "sampling rate %d, format %d, channels %x, flags %x",
+ stream, config->sample_rate, config->format, config->channel_mask, flags);
return output;
}
@@ -1074,21 +1020,21 @@
// 4: the first output in the list
if (outputs.size() == 0) {
- return 0;
+ return AUDIO_IO_HANDLE_NONE;
}
if (outputs.size() == 1) {
return outputs[0];
}
int maxCommonFlags = 0;
- audio_io_handle_t outputForFlags = 0;
- audio_io_handle_t outputForPrimary = 0;
- audio_io_handle_t outputForFormat = 0;
+ audio_io_handle_t outputForFlags = AUDIO_IO_HANDLE_NONE;
+ audio_io_handle_t outputForPrimary = AUDIO_IO_HANDLE_NONE;
+ audio_io_handle_t outputForFormat = AUDIO_IO_HANDLE_NONE;
audio_format_t bestFormat = AUDIO_FORMAT_INVALID;
audio_format_t bestFormatForFlags = AUDIO_FORMAT_INVALID;
- for (size_t i = 0; i < outputs.size(); i++) {
- sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(outputs[i]);
+ for (audio_io_handle_t output : outputs) {
+ sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
if (!outputDesc->isDuplicated()) {
// if a valid format is specified, skip output if not compatible
if (format != AUDIO_FORMAT_INVALID) {
@@ -1101,7 +1047,7 @@
}
if (AudioPort::isBetterFormatMatch(
outputDesc->mFormat, bestFormat, format)) {
- outputForFormat = outputs[i];
+ outputForFormat = output;
bestFormat = outputDesc->mFormat;
}
}
@@ -1112,29 +1058,29 @@
if (format != AUDIO_FORMAT_INVALID
&& AudioPort::isBetterFormatMatch(
outputDesc->mFormat, bestFormatForFlags, format)) {
- outputForFlags = outputs[i];
+ outputForFlags = output;
bestFormatForFlags = outputDesc->mFormat;
}
} else {
- outputForFlags = outputs[i];
+ outputForFlags = output;
maxCommonFlags = commonFlags;
bestFormatForFlags = outputDesc->mFormat;
}
- ALOGV("selectOutput() commonFlags for output %d, %04x", outputs[i], commonFlags);
+ ALOGV("selectOutput() commonFlags for output %d, %04x", output, commonFlags);
}
if (outputDesc->mProfile->getFlags() & AUDIO_OUTPUT_FLAG_PRIMARY) {
- outputForPrimary = outputs[i];
+ outputForPrimary = output;
}
}
}
- if (outputForFlags != 0) {
+ if (outputForFlags != AUDIO_IO_HANDLE_NONE) {
return outputForFlags;
}
- if (outputForFormat != 0) {
+ if (outputForFormat != AUDIO_IO_HANDLE_NONE) {
return outputForFormat;
}
- if (outputForPrimary != 0) {
+ if (outputForPrimary != AUDIO_IO_HANDLE_NONE) {
return outputForPrimary;
}
@@ -1473,7 +1419,7 @@
input_type_t *inputType,
audio_port_handle_t *portId)
{
- ALOGV("getInputForAttr() source %d, samplingRate %d, format %d, channelMask %x,"
+ ALOGV("getInputForAttr() source %d, sampling rate %d, format %d, channel mask %x,"
"session %d, flags %#x",
attr->source, config->sample_rate, config->format, config->channel_mask, session, flags);
@@ -1485,15 +1431,14 @@
AudioMix *policyMix = NULL;
DeviceVector inputDevices;
+ if (inputSource == AUDIO_SOURCE_DEFAULT) {
+ inputSource = AUDIO_SOURCE_MIC;
+ }
+
// Explicit routing?
sp<DeviceDescriptor> deviceDesc;
if (*selectedDeviceId != AUDIO_PORT_HANDLE_NONE) {
- for (size_t i = 0; i < mAvailableInputDevices.size(); i++) {
- if (mAvailableInputDevices[i]->getId() == *selectedDeviceId) {
- deviceDesc = mAvailableInputDevices[i];
- break;
- }
- }
+ deviceDesc = mAvailableOutputDevices.getDeviceFromId(*selectedDeviceId);
}
mInputRoutes.addRoute(session, SessionRoute::STREAM_TYPE_NA, inputSource, deviceDesc, uid);
@@ -1541,9 +1486,6 @@
*input = AUDIO_IO_HANDLE_NONE;
*inputType = API_INPUT_INVALID;
- if (inputSource == AUDIO_SOURCE_DEFAULT) {
- inputSource = AUDIO_SOURCE_MIC;
- }
halInputSource = inputSource;
// TODO: check for existing client for this port ID
@@ -1593,7 +1535,7 @@
}
*input = getInputForDevice(device, address, session, uid, inputSource,
- config->sample_rate, config->format, config->channel_mask, flags,
+ config, flags,
policyMix);
if (*input == AUDIO_IO_HANDLE_NONE) {
status = INVALID_OPERATION;
@@ -1620,9 +1562,7 @@
audio_session_t session,
uid_t uid,
audio_source_t inputSource,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
+ const audio_config_base_t *config,
audio_input_flags_t flags,
AudioMix *policyMix)
{
@@ -1641,16 +1581,17 @@
halInputSource = AUDIO_SOURCE_VOICE_RECOGNITION;
}
} else if (inputSource == AUDIO_SOURCE_VOICE_COMMUNICATION &&
- audio_is_linear_pcm(format)) {
+ audio_is_linear_pcm(config->format)) {
flags = (audio_input_flags_t)(flags | AUDIO_INPUT_FLAG_VOIP_TX);
}
// find a compatible input profile (not necessarily identical in parameters)
sp<IOProfile> profile;
- // samplingRate and flags may be updated by getInputProfile
- uint32_t profileSamplingRate = (samplingRate == 0) ? SAMPLE_RATE_HZ_DEFAULT : samplingRate;
- audio_format_t profileFormat = format;
- audio_channel_mask_t profileChannelMask = channelMask;
+ // sampling rate and flags may be updated by getInputProfile
+ uint32_t profileSamplingRate = (config->sample_rate == 0) ?
+ SAMPLE_RATE_HZ_DEFAULT : config->sample_rate;
+ audio_format_t profileFormat = config->format;
+ audio_channel_mask_t profileChannelMask = config->channel_mask;
audio_input_flags_t profileFlags = flags;
for (;;) {
profile = getInputProfile(device, address,
@@ -1664,12 +1605,13 @@
profileFlags = AUDIO_INPUT_FLAG_NONE; // retry
} else { // fail
ALOGW("getInputForDevice() could not find profile for device 0x%X, "
- "samplingRate %u, format %#x, channelMask 0x%X, flags %#x",
- device, samplingRate, format, channelMask, flags);
+ "sampling rate %u, format %#x, channel mask 0x%X, flags %#x",
+ device, config->sample_rate, config->format, config->channel_mask, flags);
return input;
}
}
// Pick input sampling rate if not specified by client
+ uint32_t samplingRate = config->sample_rate;
if (samplingRate == 0) {
samplingRate = profileSamplingRate;
}
@@ -1680,14 +1622,14 @@
}
sp<AudioSession> audioSession = new AudioSession(session,
- inputSource,
- format,
- samplingRate,
- channelMask,
- flags,
- uid,
- isSoundTrigger,
- policyMix, mpClientInterface);
+ inputSource,
+ config->format,
+ samplingRate,
+ config->channel_mask,
+ flags,
+ uid,
+ isSoundTrigger,
+ policyMix, mpClientInterface);
// FIXME: disable concurrent capture until UI is ready
#if 0
@@ -1731,8 +1673,8 @@
// can be selected.
if (!isConcurrentSource(inputSource) &&
((desc->mSamplingRate != samplingRate ||
- desc->mChannelMask != channelMask ||
- !audio_formats_match(desc->mFormat, format)) &&
+ desc->mChannelMask != config->channel_mask ||
+ !audio_formats_match(desc->mFormat, config->format)) &&
(source_priority(desc->getHighestPrioritySource(false /*activeOnly*/)) <
source_priority(inputSource)))) {
reusedInputDesc = desc;
@@ -1755,44 +1697,30 @@
}
#endif
- audio_config_t config = AUDIO_CONFIG_INITIALIZER;
- config.sample_rate = profileSamplingRate;
- config.channel_mask = profileChannelMask;
- config.format = profileFormat;
+ sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(profile, mpClientInterface);
- if (address == "") {
- DeviceVector inputDevices = mAvailableInputDevices.getDevicesFromType(device);
- // the inputs vector must be of size 1, but we don't want to crash here
- address = inputDevices.size() > 0 ? inputDevices.itemAt(0)->mAddress : String8("");
- }
+ audio_config_t lConfig = AUDIO_CONFIG_INITIALIZER;
+ lConfig.sample_rate = profileSamplingRate;
+ lConfig.channel_mask = profileChannelMask;
+ lConfig.format = profileFormat;
- status_t status = mpClientInterface->openInput(profile->getModuleHandle(),
- &input,
- &config,
- &device,
- address,
- halInputSource,
- profileFlags);
+ status_t status = inputDesc->open(&lConfig, device, address,
+ halInputSource, profileFlags, &input);
// only accept input with the exact requested set of parameters
if (status != NO_ERROR || input == AUDIO_IO_HANDLE_NONE ||
- (profileSamplingRate != config.sample_rate) ||
- !audio_formats_match(profileFormat, config.format) ||
- (profileChannelMask != config.channel_mask)) {
- ALOGW("getInputForAttr() failed opening input: samplingRate %d"
- ", format %d, channelMask %x",
- samplingRate, format, channelMask);
+ (profileSamplingRate != lConfig.sample_rate) ||
+ !audio_formats_match(profileFormat, lConfig.format) ||
+ (profileChannelMask != lConfig.channel_mask)) {
+ ALOGW("getInputForAttr() failed opening input: sampling rate %d"
+ ", format %d, channel mask %x",
+ profileSamplingRate, profileFormat, profileChannelMask);
if (input != AUDIO_IO_HANDLE_NONE) {
- mpClientInterface->closeInput(input);
+ inputDesc->close();
}
return AUDIO_IO_HANDLE_NONE;
}
- sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(profile);
- inputDesc->mSamplingRate = profileSamplingRate;
- inputDesc->mFormat = profileFormat;
- inputDesc->mChannelMask = profileChannelMask;
- inputDesc->mDevice = device;
inputDesc->mPolicyMix = policyMix;
inputDesc->addAudioSession(session, audioSession);
@@ -1833,9 +1761,7 @@
return true;
}
- Vector< sp<AudioInputDescriptor> > activeInputs = mInputs.getActiveInputs();
- for (size_t i = 0; i < activeInputs.size(); i++) {
- sp<AudioInputDescriptor> activeInput = activeInputs[i];
+ for (const auto& activeInput : mInputs.getActiveInputs()) {
if (!isConcurrentSource(activeInput->inputSource(true)) &&
!is_virtual_input_device(activeInput->mDevice)) {
return false;
@@ -1921,9 +1847,7 @@
}
Vector< sp<AudioInputDescriptor> > activeInputs = mInputs.getActiveInputs();
- for (size_t i = 0; i < activeInputs.size(); i++) {
- sp<AudioInputDescriptor> activeDesc = activeInputs[i];
-
+ for (const auto& activeDesc : activeInputs) {
if (is_virtual_input_device(activeDesc->mDevice)) {
continue;
}
@@ -1963,9 +1887,7 @@
inputDesc->isSoundTrigger() ? soundTriggerSupportsConcurrentCapture() : false;
// if capture is allowed, preempt currently active HOTWORD captures
- for (size_t i = 0; i < activeInputs.size(); i++) {
- sp<AudioInputDescriptor> activeDesc = activeInputs[i];
-
+ for (const auto& activeDesc : activeInputs) {
if (is_virtual_input_device(activeDesc->mDevice)) {
continue;
}
@@ -2160,7 +2082,7 @@
void AudioPolicyManager::closeAllInputs() {
bool patchRemoved = false;
- for(size_t input_index = 0; input_index < mInputs.size(); input_index++) {
+ for (size_t input_index = 0; input_index < mInputs.size(); input_index++) {
sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(input_index);
ssize_t patch_index = mAudioPatches.indexOfKey(inputDesc->getPatchHandle());
if (patch_index >= 0) {
@@ -2169,7 +2091,7 @@
mAudioPatches.removeItemsAt(patch_index);
patchRemoved = true;
}
- mpClientInterface->closeInput(mInputs.keyAt(input_index));
+ inputDesc->close();
}
mInputs.clear();
SoundTrigger::setCaptureState(false);
@@ -2324,21 +2246,21 @@
audio_io_handle_t outputDeepBuffer = AUDIO_IO_HANDLE_NONE;
audio_io_handle_t outputPrimary = AUDIO_IO_HANDLE_NONE;
- for (size_t i = 0; i < outputs.size(); i++) {
- sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(outputs[i]);
+ for (audio_io_handle_t output : outputs) {
+ sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(output);
if (activeOnly && !desc->isStreamActive(AUDIO_STREAM_MUSIC)) {
continue;
}
- ALOGV("selectOutputForMusicEffects activeOnly %d outputs[%zu] flags 0x%08x",
- activeOnly, i, desc->mFlags);
+ ALOGV("selectOutputForMusicEffects activeOnly %d output %d flags 0x%08x",
+ activeOnly, output, desc->mFlags);
if ((desc->mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
- outputOffloaded = outputs[i];
+ outputOffloaded = output;
}
if ((desc->mFlags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER) != 0) {
- outputDeepBuffer = outputs[i];
+ outputDeepBuffer = output;
}
if ((desc->mFlags & AUDIO_OUTPUT_FLAG_PRIMARY) != 0) {
- outputPrimary = outputs[i];
+ outputPrimary = output;
}
}
if (outputOffloaded != AUDIO_IO_HANDLE_NONE) {
@@ -2449,23 +2371,16 @@
break;
}
if ((mixes[i].mRouteFlags & MIX_ROUTE_FLAG_LOOP_BACK) == MIX_ROUTE_FLAG_LOOP_BACK) {
- // Loop back through "remote submix"
- if (rSubmixModule == 0) {
- for (size_t j = 0; i < mHwModules.size(); j++) {
- if (strcmp(AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX, mHwModules[j]->mName) == 0
- && mHwModules[j]->mHandle != 0) {
- rSubmixModule = mHwModules[j];
- break;
- }
- }
- }
-
ALOGV("registerPolicyMixes() mix %zu of %zu is LOOP_BACK", i, mixes.size());
-
if (rSubmixModule == 0) {
- ALOGE(" Unable to find audio module for submix, aborting mix %zu registration", i);
- res = INVALID_OPERATION;
- break;
+ rSubmixModule = mHwModules.getModuleFromName(
+ AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX);
+ if (rSubmixModule == 0) {
+ ALOGE(" Unable to find audio module for submix, aborting mix %zu registration",
+ i);
+ res = INVALID_OPERATION;
+ break;
+ }
}
String8 address = mixes[i].mDeviceAddress;
@@ -2544,24 +2459,19 @@
status_t res = NO_ERROR;
sp<HwModule> rSubmixModule;
// examine each mix's route type
- for (size_t i = 0; i < mixes.size(); i++) {
- if ((mixes[i].mRouteFlags & MIX_ROUTE_FLAG_LOOP_BACK) == MIX_ROUTE_FLAG_LOOP_BACK) {
+ for (const auto& mix : mixes) {
+ if ((mix.mRouteFlags & MIX_ROUTE_FLAG_LOOP_BACK) == MIX_ROUTE_FLAG_LOOP_BACK) {
if (rSubmixModule == 0) {
- for (size_t j = 0; i < mHwModules.size(); j++) {
- if (strcmp(AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX, mHwModules[j]->mName) == 0
- && mHwModules[j]->mHandle != 0) {
- rSubmixModule = mHwModules[j];
- break;
- }
+ rSubmixModule = mHwModules.getModuleFromName(
+ AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX);
+ if (rSubmixModule == 0) {
+ res = INVALID_OPERATION;
+ continue;
}
}
- if (rSubmixModule == 0) {
- res = INVALID_OPERATION;
- continue;
- }
- String8 address = mixes[i].mDeviceAddress;
+ String8 address = mix.mDeviceAddress;
if (mPolicyMixes.unregisterMix(address) != NO_ERROR) {
res = INVALID_OPERATION;
@@ -2583,8 +2493,8 @@
rSubmixModule->removeOutputProfile(address);
rSubmixModule->removeInputProfile(address);
- } if ((mixes[i].mRouteFlags & MIX_ROUTE_FLAG_RENDER) == MIX_ROUTE_FLAG_RENDER) {
- if (mPolicyMixes.unregisterMix(mixes[i].mDeviceAddress) != NO_ERROR) {
+ } if ((mix.mRouteFlags & MIX_ROUTE_FLAG_RENDER) == MIX_ROUTE_FLAG_RENDER) {
+ if (mPolicyMixes.unregisterMix(mix.mDeviceAddress) != NO_ERROR) {
res = INVALID_OPERATION;
continue;
}
@@ -2636,7 +2546,7 @@
mAvailableOutputDevices.dump(fd, String8("Available output"));
mAvailableInputDevices.dump(fd, String8("Available input"));
- mHwModules.dump(fd);
+ mHwModulesAll.dump(fd);
mOutputs.dump(fd);
mInputs.dump(fd);
mVolumeCurves->dump(fd);
@@ -2741,23 +2651,23 @@
// do not report devices with type AUDIO_DEVICE_IN_STUB or AUDIO_DEVICE_OUT_STUB
// as they are used by stub HALs by convention
if (role == AUDIO_PORT_ROLE_SINK || role == AUDIO_PORT_ROLE_NONE) {
- for (size_t i = 0; i < mAvailableOutputDevices.size(); i++) {
- if (mAvailableOutputDevices[i]->type() == AUDIO_DEVICE_OUT_STUB) {
+ for (const auto& dev : mAvailableOutputDevices) {
+ if (dev->type() == AUDIO_DEVICE_OUT_STUB) {
continue;
}
if (portsWritten < portsMax) {
- mAvailableOutputDevices[i]->toAudioPort(&ports[portsWritten++]);
+ dev->toAudioPort(&ports[portsWritten++]);
}
(*num_ports)++;
}
}
if (role == AUDIO_PORT_ROLE_SOURCE || role == AUDIO_PORT_ROLE_NONE) {
- for (size_t i = 0; i < mAvailableInputDevices.size(); i++) {
- if (mAvailableInputDevices[i]->type() == AUDIO_DEVICE_IN_STUB) {
+ for (const auto& dev : mAvailableInputDevices) {
+ if (dev->type() == AUDIO_DEVICE_IN_STUB) {
continue;
}
if (portsWritten < portsMax) {
- mAvailableInputDevices[i]->toAudioPort(&ports[portsWritten++]);
+ dev->toAudioPort(&ports[portsWritten++]);
}
(*num_ports)++;
}
@@ -3252,8 +3162,8 @@
}
}
// reroute outputs if necessary
- for (size_t i = 0; i < affectedStrategies.size(); i++) {
- checkStrategyRoute(affectedStrategies[i], AUDIO_IO_HANDLE_NONE);
+ for (const auto& strategy : affectedStrategies) {
+ checkStrategyRoute(strategy, AUDIO_IO_HANDLE_NONE);
}
// remove input routes associated with this uid
@@ -3275,8 +3185,8 @@
inputsToClose.add(inputDesc->mIoHandle);
}
}
- for (size_t i = 0; i < inputsToClose.size(); i++) {
- closeInput(inputsToClose[i]);
+ for (const auto& input : inputsToClose) {
+ closeInput(input);
}
}
@@ -3453,8 +3363,8 @@
offloaded.push(desc->mIoHandle);
}
}
- for (size_t i = 0; i < offloaded.size(); ++i) {
- closeOutput(offloaded[i]);
+ for (const auto& handle : offloaded) {
+ closeOutput(handle);
}
}
// update master mono for all remaining outputs
@@ -3580,13 +3490,13 @@
#ifdef USE_XML_AUDIO_POLICY_CONF
mVolumeCurves = new VolumeCurvesCollection();
- AudioPolicyConfig config(mHwModules, mAvailableOutputDevices, mAvailableInputDevices,
+ AudioPolicyConfig config(mHwModulesAll, mAvailableOutputDevices, mAvailableInputDevices,
mDefaultOutputDevice, speakerDrcEnabled,
static_cast<VolumeCurvesCollection *>(mVolumeCurves));
if (deserializeAudioPolicyXmlConfig(config) != NO_ERROR) {
#else
mVolumeCurves = new StreamDescriptorCollection();
- AudioPolicyConfig config(mHwModules, mAvailableOutputDevices, mAvailableInputDevices,
+ AudioPolicyConfig config(mHwModulesAll, mAvailableOutputDevices, mAvailableInputDevices,
mDefaultOutputDevice, speakerDrcEnabled);
if ((ConfigParsingUtils::loadConfig(AUDIO_POLICY_VENDOR_CONFIG_FILE, config) != NO_ERROR) &&
(ConfigParsingUtils::loadConfig(AUDIO_POLICY_CONFIG_FILE, config) != NO_ERROR)) {
@@ -3618,22 +3528,20 @@
// open all output streams needed to access attached devices
audio_devices_t outputDeviceTypes = mAvailableOutputDevices.types();
audio_devices_t inputDeviceTypes = mAvailableInputDevices.types() & ~AUDIO_DEVICE_BIT_IN;
- for (size_t i = 0; i < mHwModules.size(); i++) {
- mHwModules[i]->mHandle = mpClientInterface->loadHwModule(mHwModules[i]->getName());
- if (mHwModules[i]->mHandle == 0) {
- ALOGW("could not open HW module %s", mHwModules[i]->getName());
+ for (const auto& hwModule : mHwModulesAll) {
+ hwModule->mHandle = mpClientInterface->loadHwModule(hwModule->getName());
+ if (hwModule->getHandle() == AUDIO_MODULE_HANDLE_NONE) {
+ ALOGW("could not open HW module %s", hwModule->getName());
continue;
}
+ mHwModules.push_back(hwModule);
// open all output streams needed to access attached devices
// except for direct output streams that are only opened when they are actually
// required by an app.
// This also validates mAvailableOutputDevices list
- for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++)
- {
- const sp<IOProfile> outProfile = mHwModules[i]->mOutputProfiles[j];
-
+ for (const auto& outProfile : hwModule->mOutputProfiles) {
if (!outProfile->hasSupportedDevices()) {
- ALOGW("Output profile contains no device on module %s", mHwModules[i]->getName());
+ ALOGW("Output profile contains no device on module %s", hwModule->getName());
continue;
}
if ((outProfile->getFlags() & AUDIO_OUTPUT_FLAG_TTS) != 0) {
@@ -3660,35 +3568,20 @@
const DeviceVector &devicesForType = supportedDevices.getDevicesFromType(profileType);
String8 address = devicesForType.size() > 0 ? devicesForType.itemAt(0)->mAddress
: String8("");
-
- outputDesc->mDevice = profileType;
- audio_config_t config = AUDIO_CONFIG_INITIALIZER;
- config.sample_rate = outputDesc->mSamplingRate;
- config.channel_mask = outputDesc->mChannelMask;
- config.format = outputDesc->mFormat;
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
- status_t status = mpClientInterface->openOutput(outProfile->getModuleHandle(),
- &output,
- &config,
- &outputDesc->mDevice,
- address,
- &outputDesc->mLatency,
- outputDesc->mFlags);
+ status_t status = outputDesc->open(nullptr, profileType, address,
+ AUDIO_STREAM_DEFAULT, AUDIO_OUTPUT_FLAG_NONE, &output);
if (status != NO_ERROR) {
ALOGW("Cannot open output stream for device %08x on hw module %s",
outputDesc->mDevice,
- mHwModules[i]->getName());
+ hwModule->getName());
} else {
- outputDesc->mSamplingRate = config.sample_rate;
- outputDesc->mChannelMask = config.channel_mask;
- outputDesc->mFormat = config.format;
-
- for (size_t k = 0; k < supportedDevices.size(); k++) {
- ssize_t index = mAvailableOutputDevices.indexOf(supportedDevices[k]);
+ for (const auto& dev : supportedDevices) {
+ ssize_t index = mAvailableOutputDevices.indexOf(dev);
// give a valid ID to an attached device once confirmed it is reachable
if (index >= 0 && !mAvailableOutputDevices[index]->isAttached()) {
- mAvailableOutputDevices[index]->attach(mHwModules[i]);
+ mAvailableOutputDevices[index]->attach(hwModule);
}
}
if (mPrimaryOutput == 0 &&
@@ -3697,21 +3590,18 @@
}
addOutput(output, outputDesc);
setOutputDevice(outputDesc,
- outputDesc->mDevice,
+ profileType,
true,
0,
NULL,
- address.string());
+ address);
}
}
// open input streams needed to access attached devices to validate
// mAvailableInputDevices list
- for (size_t j = 0; j < mHwModules[i]->mInputProfiles.size(); j++)
- {
- const sp<IOProfile> inProfile = mHwModules[i]->mInputProfiles[j];
-
+ for (const auto& inProfile : hwModule->mInputProfiles) {
if (!inProfile->hasSupportedDevices()) {
- ALOGW("Input profile contains no device on module %s", mHwModules[i]->getName());
+ ALOGW("Input profile contains no device on module %s", hwModule->getName());
continue;
}
// chose first device present in profile's SupportedDevices also part of
@@ -3722,49 +3612,33 @@
continue;
}
sp<AudioInputDescriptor> inputDesc =
- new AudioInputDescriptor(inProfile);
+ new AudioInputDescriptor(inProfile, mpClientInterface);
- inputDesc->mDevice = profileType;
-
- // find the address
- DeviceVector inputDevices = mAvailableInputDevices.getDevicesFromType(profileType);
- // the inputs vector must be of size 1, but we don't want to crash here
- String8 address = inputDevices.size() > 0 ? inputDevices.itemAt(0)->mAddress
- : String8("");
- ALOGV(" for input device 0x%x using address %s", profileType, address.string());
- ALOGE_IF(inputDevices.size() == 0, "Input device list is empty!");
-
- audio_config_t config = AUDIO_CONFIG_INITIALIZER;
- config.sample_rate = inputDesc->mSamplingRate;
- config.channel_mask = inputDesc->mChannelMask;
- config.format = inputDesc->mFormat;
audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
- status_t status = mpClientInterface->openInput(inProfile->getModuleHandle(),
- &input,
- &config,
- &inputDesc->mDevice,
- address,
- AUDIO_SOURCE_MIC,
- AUDIO_INPUT_FLAG_NONE);
+ status_t status = inputDesc->open(nullptr,
+ profileType,
+ String8(""),
+ AUDIO_SOURCE_MIC,
+ AUDIO_INPUT_FLAG_NONE,
+ &input);
if (status == NO_ERROR) {
- const DeviceVector &supportedDevices = inProfile->getSupportedDevices();
- for (size_t k = 0; k < supportedDevices.size(); k++) {
- ssize_t index = mAvailableInputDevices.indexOf(supportedDevices[k]);
+ for (const auto& dev : inProfile->getSupportedDevices()) {
+ ssize_t index = mAvailableInputDevices.indexOf(dev);
// give a valid ID to an attached device once confirmed it is reachable
if (index >= 0) {
sp<DeviceDescriptor> devDesc = mAvailableInputDevices[index];
if (!devDesc->isAttached()) {
- devDesc->attach(mHwModules[i]);
+ devDesc->attach(hwModule);
devDesc->importAudioPort(inProfile, true);
}
}
}
- mpClientInterface->closeInput(input);
+ inputDesc->close();
} else {
ALOGW("Cannot open input stream for device %08x on hw module %s",
- inputDesc->mDevice,
- mHwModules[i]->getName());
+ profileType,
+ hwModule->getName());
}
}
}
@@ -3804,16 +3678,17 @@
AudioPolicyManager::~AudioPolicyManager()
{
for (size_t i = 0; i < mOutputs.size(); i++) {
- mpClientInterface->closeOutput(mOutputs.keyAt(i));
+ mOutputs.valueAt(i)->close();
}
for (size_t i = 0; i < mInputs.size(); i++) {
- mpClientInterface->closeInput(mInputs.keyAt(i));
+ mInputs.valueAt(i)->close();
}
mAvailableOutputDevices.clear();
mAvailableInputDevices.clear();
mOutputs.clear();
mInputs.clear();
mHwModules.clear();
+ mHwModulesAll.clear();
}
status_t AudioPolicyManager::initCheck()
@@ -3825,7 +3700,6 @@
void AudioPolicyManager::addOutput(audio_io_handle_t output, const sp<SwAudioOutputDescriptor>& outputDesc)
{
- outputDesc->setIoHandle(output);
mOutputs.add(output, outputDesc);
updateMono(output); // update mono status when adding to output list
selectOutputForMusicEffects();
@@ -3840,7 +3714,6 @@
void AudioPolicyManager::addInput(audio_io_handle_t input, const sp<AudioInputDescriptor>& inputDesc)
{
- inputDesc->setIoHandle(input);
mInputs.add(input, inputDesc);
nextAudioPortGeneration();
}
@@ -3887,19 +3760,15 @@
}
// then look for output profiles that can be routed to this device
SortedVector< sp<IOProfile> > profiles;
- for (size_t i = 0; i < mHwModules.size(); i++)
- {
- if (mHwModules[i]->mHandle == 0) {
- continue;
- }
- for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++)
- {
- sp<IOProfile> profile = mHwModules[i]->mOutputProfiles[j];
+ for (const auto& hwModule : mHwModules) {
+ for (size_t j = 0; j < hwModule->mOutputProfiles.size(); j++) {
+ sp<IOProfile> profile = hwModule->mOutputProfiles[j];
if (profile->supportDevice(device)) {
if (!device_distinguishes_on_address(device) ||
profile->supportDeviceAddress(address)) {
profiles.add(profile);
- ALOGV("checkOutputsForDevice(): adding profile %zu from module %zu", j, i);
+ ALOGV("checkOutputsForDevice(): adding profile %zu from module %s",
+ j, hwModule->getName());
}
}
}
@@ -3937,27 +3806,11 @@
ALOGV("opening output for device %08x with params %s profile %p name %s",
device, address.string(), profile.get(), profile->getName().string());
desc = new SwAudioOutputDescriptor(profile, mpClientInterface);
- desc->mDevice = device;
- audio_config_t config = AUDIO_CONFIG_INITIALIZER;
- config.sample_rate = desc->mSamplingRate;
- config.channel_mask = desc->mChannelMask;
- config.format = desc->mFormat;
- config.offload_info.sample_rate = desc->mSamplingRate;
- config.offload_info.channel_mask = desc->mChannelMask;
- config.offload_info.format = desc->mFormat;
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
- status_t status = mpClientInterface->openOutput(profile->getModuleHandle(),
- &output,
- &config,
- &desc->mDevice,
- address,
- &desc->mLatency,
- desc->mFlags);
- if (status == NO_ERROR) {
- desc->mSamplingRate = config.sample_rate;
- desc->mChannelMask = config.channel_mask;
- desc->mFormat = config.format;
+ status_t status = desc->open(nullptr, device, address,
+ AUDIO_STREAM_DEFAULT, AUDIO_OUTPUT_FLAG_NONE, &output);
+ if (status == NO_ERROR) {
// Here is where the out_set_parameters() for card & device gets called
if (!address.isEmpty()) {
char *param = audio_device_address_to_parameter(device, address);
@@ -3967,27 +3820,21 @@
updateAudioProfiles(device, output, profile->getAudioProfiles());
if (!profile->hasValidAudioProfile()) {
ALOGW("checkOutputsForDevice() missing param");
- mpClientInterface->closeOutput(output);
+ desc->close();
output = AUDIO_IO_HANDLE_NONE;
} else if (profile->hasDynamicAudioProfile()) {
- mpClientInterface->closeOutput(output);
+ desc->close();
output = AUDIO_IO_HANDLE_NONE;
- profile->pickAudioProfile(config.sample_rate, config.channel_mask, config.format);
+ audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+ profile->pickAudioProfile(
+ config.sample_rate, config.channel_mask, config.format);
config.offload_info.sample_rate = config.sample_rate;
config.offload_info.channel_mask = config.channel_mask;
config.offload_info.format = config.format;
- status = mpClientInterface->openOutput(profile->getModuleHandle(),
- &output,
- &config,
- &desc->mDevice,
- address,
- &desc->mLatency,
- desc->mFlags);
- if (status == NO_ERROR) {
- desc->mSamplingRate = config.sample_rate;
- desc->mChannelMask = config.channel_mask;
- desc->mFormat = config.format;
- } else {
+
+ status_t status = desc->open(&config, device, address, AUDIO_STREAM_DEFAULT,
+ AUDIO_OUTPUT_FLAG_NONE, &output);
+ if (status != NO_ERROR) {
output = AUDIO_IO_HANDLE_NONE;
}
}
@@ -4033,7 +3880,7 @@
} else {
ALOGW("checkOutputsForDevice() could not open dup output for %d and %d",
mPrimaryOutput->mIoHandle, output);
- mpClientInterface->closeOutput(output);
+ desc->close();
removeOutput(output);
nextAudioPortGeneration();
output = AUDIO_IO_HANDLE_NONE;
@@ -4085,17 +3932,13 @@
}
}
// Clear any profiles associated with the disconnected device.
- for (size_t i = 0; i < mHwModules.size(); i++)
- {
- if (mHwModules[i]->mHandle == 0) {
- continue;
- }
- for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++)
- {
- sp<IOProfile> profile = mHwModules[i]->mOutputProfiles[j];
+ for (const auto& hwModule : mHwModules) {
+ for (size_t j = 0; j < hwModule->mOutputProfiles.size(); j++) {
+ sp<IOProfile> profile = hwModule->mOutputProfiles[j];
if (profile->supportDevice(device)) {
ALOGV("checkOutputsForDevice(): "
- "clearing direct output profile %zu on module %zu", j, i);
+ "clearing direct output profile %zu on module %s",
+ j, hwModule->getName());
profile->clearAudioProfiles();
}
}
@@ -4129,23 +3972,18 @@
// then look for input profiles that can be routed to this device
SortedVector< sp<IOProfile> > profiles;
- for (size_t module_idx = 0; module_idx < mHwModules.size(); module_idx++)
- {
- if (mHwModules[module_idx]->mHandle == 0) {
- continue;
- }
+ for (const auto& hwModule : mHwModules) {
for (size_t profile_index = 0;
- profile_index < mHwModules[module_idx]->mInputProfiles.size();
- profile_index++)
- {
- sp<IOProfile> profile = mHwModules[module_idx]->mInputProfiles[profile_index];
+ profile_index < hwModule->mInputProfiles.size();
+ profile_index++) {
+ sp<IOProfile> profile = hwModule->mInputProfiles[profile_index];
if (profile->supportDevice(device)) {
if (!device_distinguishes_on_address(device) ||
profile->supportDeviceAddress(address)) {
profiles.add(profile);
- ALOGV("checkInputsForDevice(): adding profile %zu from module %zu",
- profile_index, module_idx);
+ ALOGV("checkInputsForDevice(): adding profile %zu from module %s",
+ profile_index, hwModule->getName());
}
}
}
@@ -4176,31 +4014,16 @@
continue;
}
- ALOGV("opening input for device 0x%X with params %s", device, address.string());
- desc = new AudioInputDescriptor(profile);
- desc->mDevice = device;
- audio_config_t config = AUDIO_CONFIG_INITIALIZER;
- config.sample_rate = desc->mSamplingRate;
- config.channel_mask = desc->mChannelMask;
- config.format = desc->mFormat;
+ desc = new AudioInputDescriptor(profile, mpClientInterface);
audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
-
- ALOGV("opening inputput for device %08x with params %s profile %p name %s",
- desc->mDevice, address.string(), profile.get(), profile->getName().string());
-
- status_t status = mpClientInterface->openInput(profile->getModuleHandle(),
- &input,
- &config,
- &desc->mDevice,
- address,
- AUDIO_SOURCE_MIC,
- AUDIO_INPUT_FLAG_NONE /*FIXME*/);
+ status_t status = desc->open(nullptr,
+ device,
+ address,
+ AUDIO_SOURCE_MIC,
+ AUDIO_INPUT_FLAG_NONE,
+ &input);
if (status == NO_ERROR) {
- desc->mSamplingRate = config.sample_rate;
- desc->mChannelMask = config.channel_mask;
- desc->mFormat = config.format;
-
if (!address.isEmpty()) {
char *param = audio_device_address_to_parameter(device, address);
mpClientInterface->setParameters(input, String8(param));
@@ -4209,7 +4032,7 @@
updateAudioProfiles(device, input, profile->getAudioProfiles());
if (!profile->hasValidAudioProfile()) {
ALOGW("checkInputsForDevice() direct input missing param");
- mpClientInterface->closeInput(input);
+ desc->close();
input = AUDIO_IO_HANDLE_NONE;
}
@@ -4247,17 +4070,14 @@
}
}
// Clear any profiles associated with the disconnected device.
- for (size_t module_index = 0; module_index < mHwModules.size(); module_index++) {
- if (mHwModules[module_index]->mHandle == 0) {
- continue;
- }
+ for (const auto& hwModule : mHwModules) {
for (size_t profile_index = 0;
- profile_index < mHwModules[module_index]->mInputProfiles.size();
+ profile_index < hwModule->mInputProfiles.size();
profile_index++) {
- sp<IOProfile> profile = mHwModules[module_index]->mInputProfiles[profile_index];
+ sp<IOProfile> profile = hwModule->mInputProfiles[profile_index];
if (profile->supportDevice(device)) {
- ALOGV("checkInputsForDevice(): clearing direct input profile %zu on module %zu",
- profile_index, module_index);
+ ALOGV("checkInputsForDevice(): clearing direct input profile %zu on module %s",
+ profile_index, hwModule->getName());
profile->clearAudioProfiles();
}
}
@@ -4317,11 +4137,8 @@
mpClientInterface->onAudioPatchListUpdate();
}
- AudioParameter param;
- param.add(String8("closing"), String8("true"));
- mpClientInterface->setParameters(output, param.toString());
+ outputDesc->close();
- mpClientInterface->closeOutput(output);
removeOutput(output);
mPreviousOutputs = mOutputs;
}
@@ -4346,7 +4163,7 @@
mpClientInterface->onAudioPatchListUpdate();
}
- mpClientInterface->closeInput(input);
+ inputDesc->close();
mInputs.removeItem(input);
}
@@ -4412,14 +4229,14 @@
ALOGV("checkOutputForStrategy() strategy %d, moving from output %d to output %d",
strategy, srcOutputs[0], dstOutputs[0]);
// mute strategy while moving tracks from one output to another
- for (size_t i = 0; i < srcOutputs.size(); i++) {
- sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(srcOutputs[i]);
+ for (audio_io_handle_t srcOut : srcOutputs) {
+ sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(srcOut);
if (isStrategyActive(desc, strategy)) {
setStrategyMute(strategy, true, desc);
setStrategyMute(strategy, false, desc, MUTE_TIME_MS, newDevice);
}
sp<AudioSourceDescriptor> source =
- getSourceForStrategyOnOutput(srcOutputs[i], strategy);
+ getSourceForStrategyOnOutput(srcOut, strategy);
if (source != 0){
connectAudioSource(source);
}
@@ -4613,9 +4430,8 @@
routing_strategy curStrategy = getStrategy((audio_stream_type_t)curStream);
audio_devices_t curDevices =
getDeviceForStrategy((routing_strategy)curStrategy, false /*fromCache*/);
- SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(curDevices, mOutputs);
- for (size_t i = 0; i < outputs.size(); i++) {
- sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(outputs[i]);
+ for (audio_io_handle_t output : getOutputsForDevice(curDevices, mOutputs)) {
+ sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
if (outputDesc->isStreamActive((audio_stream_type_t)curStream)) {
curDevices |= outputDesc->device();
}
@@ -5099,14 +4915,8 @@
// TODO: perhaps isCompatibleProfile should return a "matching" score so we can return
// the best matching profile, not the first one.
- for (size_t i = 0; i < mHwModules.size(); i++)
- {
- if (mHwModules[i]->mHandle == 0) {
- continue;
- }
- for (size_t j = 0; j < mHwModules[i]->mInputProfiles.size(); j++)
- {
- sp<IOProfile> profile = mHwModules[i]->mInputProfiles[j];
+ for (const auto& hwModule : mHwModules) {
+ for (const auto& profile : hwModule->mInputProfiles) {
// profile->log();
if (profile->isCompatibleProfile(device, address, samplingRate,
&samplingRate /*updatedSamplingRate*/,
@@ -5586,7 +5396,7 @@
bool supportsAC3 = false;
bool supportsOtherSurround = false;
bool supportsIEC61937 = false;
- for (size_t formatIndex = 0; formatIndex < formats.size(); formatIndex++) {
+ for (ssize_t formatIndex = 0; formatIndex < (ssize_t)formats.size(); formatIndex++) {
audio_format_t format = formats[formatIndex];
switch (format) {
case AUDIO_FORMAT_AC3:
@@ -5682,8 +5492,7 @@
} else if (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS) {
bool supports5dot1 = false;
// Are there any channel masks that can be considered "surround"?
- for (size_t maskIndex = 0; maskIndex < channelMasks.size(); maskIndex++) {
- audio_channel_mask_t channelMask = channelMasks[maskIndex];
+ for (audio_channel_mask_t channelMask : channelMasks) {
if ((channelMask & AUDIO_CHANNEL_OUT_5POINT1) == AUDIO_CHANNEL_OUT_5POINT1) {
supports5dot1 = true;
break;
@@ -5720,10 +5529,8 @@
}
profiles.setFormats(formats);
}
- const FormatVector &supportedFormats = profiles.getSupportedFormats();
- for (size_t formatIndex = 0; formatIndex < supportedFormats.size(); formatIndex++) {
- audio_format_t format = supportedFormats[formatIndex];
+ for (audio_format_t format : profiles.getSupportedFormats()) {
ChannelsVector channelMasks;
SampleRateVector samplingRates;
AudioParameter requestedParameters;
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 11894dc..b61bc2d 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -266,7 +266,7 @@
{
return mDefaultOutputDevice;
}
-protected:
+
void addOutput(audio_io_handle_t output, const sp<SwAudioOutputDescriptor>& outputDesc);
void removeOutput(audio_io_handle_t output);
void addInput(audio_io_handle_t input, const sp<AudioInputDescriptor>& inputDesc);
@@ -530,7 +530,9 @@
EffectDescriptorCollection mEffects; // list of registered audio effects
bool mA2dpSuspended; // true if A2DP output is suspended
sp<DeviceDescriptor> mDefaultOutputDevice; // output device selected by default at boot time
- HwModuleCollection mHwModules;
+ HwModuleCollection mHwModules; // contains only modules that have been loaded successfully
+ HwModuleCollection mHwModulesAll; // normally not needed, used during construction and for
+ // dumps
volatile int32_t mAudioPortGeneration;
@@ -601,20 +603,15 @@
audio_devices_t device,
audio_session_t session,
audio_stream_type_t stream,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- audio_output_flags_t flags,
- const audio_offload_info_t *offloadInfo);
+ const audio_config_t *config,
+ audio_output_flags_t flags);
// internal method to return the input handle for the given device and format
audio_io_handle_t getInputForDevice(audio_devices_t device,
String8 address,
audio_session_t session,
uid_t uid,
audio_source_t inputSource,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
+ const audio_config_base_t *config,
audio_input_flags_t flags,
AudioMix *policyMix);
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index bd94e3e..1ee5ccf 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -278,8 +278,8 @@
return NO_INIT;
}
// already checked by client, but double-check in case the client wrapper is bypassed
- if (attr->source >= AUDIO_SOURCE_CNT && attr->source != AUDIO_SOURCE_HOTWORD &&
- attr->source != AUDIO_SOURCE_FM_TUNER) {
+ if (attr->source < AUDIO_SOURCE_DEFAULT && attr->source >= AUDIO_SOURCE_CNT &&
+ attr->source != AUDIO_SOURCE_HOTWORD && attr->source != AUDIO_SOURCE_FM_TUNER) {
return BAD_VALUE;
}
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index 7ec3ccb..1fbba58 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -91,9 +91,6 @@
LOCAL_CFLAGS += -Wall -Wextra -Werror
-# Workaround for invalid unused-lambda-capture warning http://b/38349491
-LOCAL_CLANG_CFLAGS += -Wno-error=unused-lambda-capture
-
LOCAL_MODULE:= libcameraservice
include $(BUILD_SHARED_LIBRARY)
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 2cf648f..585d2eb 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -859,6 +859,12 @@
outputStreams.push(getPreviewStreamId());
+ if (params.isDeviceZslSupported) {
+ // If device ZSL is supported, resume preview buffers that may be paused
+ // during last takePicture().
+ mDevice->dropStreamBuffers(false, getPreviewStreamId());
+ }
+
if (!params.recordingHint) {
if (!restart) {
res = mStreamingProcessor->updatePreviewRequest(params);
diff --git a/services/camera/libcameraservice/api1/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp
index a407d0b..910dd78 100644
--- a/services/camera/libcameraservice/api1/CameraClient.cpp
+++ b/services/camera/libcameraservice/api1/CameraClient.cpp
@@ -136,7 +136,12 @@
const char *enddump = "\n\n";
write(fd, enddump, strlen(enddump));
- return mHardware->dump(fd, args);
+ sp<CameraHardwareInterface> hardware = mHardware;
+ if (hardware != nullptr) {
+ return hardware->dump(fd, args);
+ }
+ ALOGI("%s: camera device closed already, skip dumping", __FUNCTION__);
+ return OK;
}
// ----------------------------------------------------------------------------
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
index b65f1c7..1ee216f 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
@@ -553,6 +553,12 @@
return DONE;
}
+ if (l.mParameters.isDeviceZslSupported) {
+ // If device ZSL is supported, drop all pending preview buffers to reduce the chance of
+ // rendering preview frames newer than the still frame.
+ client->getCameraDevice()->dropStreamBuffers(true, client->getPreviewStreamId());
+ }
+
/**
* Clear the streaming request for still-capture pictures
* (as opposed to i.e. video snapshots)
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 68384b0..f1f96c3 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -359,6 +359,12 @@
const std::vector<android::camera3::OutputStreamInfo> &outputInfo,
const std::vector<size_t> &removedSurfaceIds,
KeyedVector<sp<Surface>, size_t> *outputMap/*out*/) = 0;
+
+ /**
+ * Drop buffers for stream of streamId if dropping is true. If dropping is false, do not
+ * drop buffers for stream of streamId.
+ */
+ virtual status_t dropStreamBuffers(bool /*dropping*/, int /*streamId*/) = 0;
};
}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index e0a2dd4..c0db8e7 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -2039,6 +2039,20 @@
return res;
}
+status_t Camera3Device::dropStreamBuffers(bool dropping, int streamId) {
+ Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock l(mLock);
+
+ int idx = mOutputStreams.indexOfKey(streamId);
+ if (idx == NAME_NOT_FOUND) {
+ ALOGE("%s: Stream %d is not found.", __FUNCTION__, streamId);
+ return BAD_VALUE;
+ }
+
+ sp<Camera3OutputStreamInterface> stream = mOutputStreams.editValueAt(idx);
+ return stream->dropBuffers(dropping);
+}
+
/**
* Camera3Device private methods
*/
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 357b893..e9466ab 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -192,6 +192,12 @@
const std::vector<size_t> &removedSurfaceIds,
KeyedVector<sp<Surface>, size_t> *outputMap/*out*/);
+ /**
+ * Drop buffers for stream of streamId if dropping is true. If dropping is false, do not
+ * drop buffers for stream of streamId.
+ */
+ status_t dropStreamBuffers(bool dropping, int streamId) override;
+
private:
// internal typedefs
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
index 4b36ea2..0a245c4 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
@@ -108,6 +108,10 @@
return false;
}
+status_t Camera3DummyStream::dropBuffers(bool /*dropping*/) {
+ return OK;
+}
+
status_t Camera3DummyStream::setConsumers(const std::vector<sp<Surface>>& /*consumers*/) {
ALOGE("%s: Stream %d: Dummy stream doesn't support set consumer surface!",
__FUNCTION__, mId);
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.h b/services/camera/libcameraservice/device3/Camera3DummyStream.h
index 3212031..684f4b0 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.h
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.h
@@ -57,6 +57,12 @@
virtual status_t detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd);
/**
+ * Drop buffers for stream of streamId if dropping is true. If dropping is false, do not
+ * drop buffers for stream of streamId.
+ */
+ virtual status_t dropBuffers(bool /*dropping*/) override;
+
+ /**
* Return if this output stream is for video encoding.
*/
bool isVideoStream() const;
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 329172a..e79eecc 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -44,6 +44,7 @@
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
mConsumerUsage(0),
+ mDropBuffers(false),
mDequeueBufferLatency(kDequeueLatencyBinSize) {
if (mConsumer == NULL) {
@@ -70,6 +71,7 @@
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
mConsumerUsage(0),
+ mDropBuffers(false),
mDequeueBufferLatency(kDequeueLatencyBinSize) {
if (format != HAL_PIXEL_FORMAT_BLOB && format != HAL_PIXEL_FORMAT_RAW_OPAQUE) {
@@ -100,6 +102,7 @@
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
mConsumerUsage(consumerUsage),
+ mDropBuffers(false),
mDequeueBufferLatency(kDequeueLatencyBinSize) {
// Deferred consumer only support preview surface format now.
if (format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
@@ -139,6 +142,7 @@
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
mConsumerUsage(consumerUsage),
+ mDropBuffers(false),
mDequeueBufferLatency(kDequeueLatencyBinSize) {
if (setId > CAMERA3_STREAM_SET_ID_INVALID) {
@@ -227,9 +231,14 @@
/**
* Return buffer back to ANativeWindow
*/
- if (buffer.status == CAMERA3_BUFFER_STATUS_ERROR) {
+ if (buffer.status == CAMERA3_BUFFER_STATUS_ERROR || mDropBuffers) {
// Cancel buffer
- ALOGW("A frame is dropped for stream %d", mId);
+ if (mDropBuffers) {
+ ALOGV("%s: Dropping a frame for stream %d.", __FUNCTION__, mId);
+ } else {
+ ALOGW("%s: A frame is dropped for stream %d due to buffer error.", __FUNCTION__, mId);
+ }
+
res = currentConsumer->cancelBuffer(currentConsumer.get(),
anwBuffer,
anwReleaseFence);
@@ -785,6 +794,12 @@
return res;
}
+status_t Camera3OutputStream::dropBuffers(bool dropping) {
+ Mutex::Autolock l(mLock);
+ mDropBuffers = dropping;
+ return OK;
+}
+
status_t Camera3OutputStream::notifyBufferReleased(ANativeWindowBuffer* /*anwBuffer*/) {
return OK;
}
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index fbb14fe..18b1901 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -166,6 +166,11 @@
virtual status_t notifyBufferReleased(ANativeWindowBuffer *anwBuffer);
/**
+ * Drop buffers if dropping is true. If dropping is false, do not drop buffers.
+ */
+ virtual status_t dropBuffers(bool dropping) override;
+
+ /**
* Set the graphic buffer manager to get/return the stream buffers.
*
* It is only legal to call this method when stream is in STATE_CONSTRUCTED state.
@@ -260,6 +265,9 @@
*/
uint64_t mConsumerUsage;
+ // Whether to drop valid buffers.
+ bool mDropBuffers;
+
/**
* Internal Camera3Stream interface
*/
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
index edfbab1..08fcf38 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
@@ -73,6 +73,11 @@
const std::vector<OutputStreamInfo> &outputInfo,
const std::vector<size_t> &removedSurfaceIds,
KeyedVector<sp<Surface>, size_t> *outputMap/*out*/) = 0;
+
+ /**
+ * Drop buffers if dropping is true. If dropping is false, do not drop buffers.
+ */
+ virtual status_t dropBuffers(bool /*dropping*/) = 0;
};
} // namespace camera3
diff --git a/services/mediaanalytics/MediaAnalyticsService.cpp b/services/mediaanalytics/MediaAnalyticsService.cpp
index 8444444..7f42b1b 100644
--- a/services/mediaanalytics/MediaAnalyticsService.cpp
+++ b/services/mediaanalytics/MediaAnalyticsService.cpp
@@ -272,7 +272,7 @@
}
ALOGV("given uid %d; sanitized uid: %d sanitized pkg: %s "
- "sanitized pkg version: %d",
+ "sanitized pkg version: %" PRId64,
uid_given, item->getUid(),
item->getPkgName().c_str(),
item->getPkgVersionCode());
@@ -856,7 +856,7 @@
} else {
AString pkg;
std::string installer = "";
- int32_t versionCode = 0;
+ int64_t versionCode = 0;
struct passwd *pw = getpwuid(uid);
if (pw) {
@@ -926,7 +926,7 @@
}
- ALOGV("package '%s' installed by '%s' versioncode %d / %08x",
+ ALOGV("package '%s' installed by '%s' versioncode %" PRId64 " / %" PRIx64,
pkg.c_str(), installer.c_str(), versionCode, versionCode);
if (strncmp(installer.c_str(), "com.android.", 12) == 0) {
diff --git a/services/mediaanalytics/MediaAnalyticsService.h b/services/mediaanalytics/MediaAnalyticsService.h
index 3b34f44..fce7d08 100644
--- a/services/mediaanalytics/MediaAnalyticsService.h
+++ b/services/mediaanalytics/MediaAnalyticsService.h
@@ -138,7 +138,7 @@
uid_t uid;
AString pkg;
AString installer;
- int32_t versionCode;
+ int64_t versionCode;
nsecs_t expiration;
};
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
index 51ae665..ac3202b 100644
--- a/services/oboeservice/AAudioService.cpp
+++ b/services/oboeservice/AAudioService.cpp
@@ -142,7 +142,31 @@
}
}
+// If a close request is pending then close the stream
+bool AAudioService::releaseStream(const sp<AAudioServiceStreamBase> &serviceStream) {
+ bool closed = false;
+ if ((serviceStream->decrementServiceReferenceCount() == 0) && serviceStream->isCloseNeeded()) {
+ // removeStreamByHandle() uses a lock so that if there are two simultaneous closes
+ // then only one will get the pointer and do the close.
+ sp<AAudioServiceStreamBase> foundStream = mStreamTracker.removeStreamByHandle(serviceStream->getHandle());
+ if (foundStream.get() != nullptr) {
+ foundStream->close();
+ pid_t pid = foundStream->getOwnerProcessId();
+ AAudioClientTracker::getInstance().unregisterClientStream(pid, foundStream);
+ }
+ closed = true;
+ }
+ return closed;
+}
+
+aaudio_result_t AAudioService::checkForPendingClose(
+ const sp<AAudioServiceStreamBase> &serviceStream,
+ aaudio_result_t defaultResult) {
+ return releaseStream(serviceStream) ? AAUDIO_ERROR_INVALID_STATE : defaultResult;
+}
+
aaudio_result_t AAudioService::closeStream(aaudio_handle_t streamHandle) {
+ ALOGD("closeStream(0x%08X)", streamHandle);
// Check permission and ownership first.
sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
@@ -150,22 +174,13 @@
return AAUDIO_ERROR_INVALID_HANDLE;
}
- ALOGD("closeStream(0x%08X)", streamHandle);
- // Remove handle from tracker so that we cannot look up the raw address any more.
- // removeStreamByHandle() uses a lock so that if there are two simultaneous closes
- // then only one will get the pointer and do the close.
- serviceStream = mStreamTracker.removeStreamByHandle(streamHandle);
- if (serviceStream.get() != nullptr) {
- serviceStream->close();
- pid_t pid = serviceStream->getOwnerProcessId();
- AAudioClientTracker::getInstance().unregisterClientStream(pid, serviceStream);
- return AAUDIO_OK;
- } else {
- ALOGW("closeStream(0x%0x) being handled by another thread", streamHandle);
- return AAUDIO_ERROR_INVALID_HANDLE;
- }
-}
+ pid_t pid = serviceStream->getOwnerProcessId();
+ AAudioClientTracker::getInstance().unregisterClientStream(pid, serviceStream);
+ serviceStream->setCloseNeeded(true);
+ (void) releaseStream(serviceStream);
+ return AAUDIO_OK;
+}
sp<AAudioServiceStreamBase> AAudioService::convertHandleToServiceStream(
aaudio_handle_t streamHandle) {
@@ -181,7 +196,9 @@
if (!allowed) {
ALOGE("AAudioService: calling uid %d cannot access stream 0x%08X owned by %d",
callingUserId, streamHandle, ownerUserId);
- serviceStream = nullptr;
+ serviceStream.clear();
+ } else {
+ serviceStream->incrementServiceReferenceCount();
}
}
return serviceStream;
@@ -198,7 +215,7 @@
aaudio_result_t result = serviceStream->getDescription(parcelable);
// parcelable.dump();
- return result;
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::startStream(aaudio_handle_t streamHandle) {
@@ -208,7 +225,8 @@
return AAUDIO_ERROR_INVALID_HANDLE;
}
- return serviceStream->start();
+ aaudio_result_t result = serviceStream->start();
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::pauseStream(aaudio_handle_t streamHandle) {
@@ -218,7 +236,7 @@
return AAUDIO_ERROR_INVALID_HANDLE;
}
aaudio_result_t result = serviceStream->pause();
- return result;
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::stopStream(aaudio_handle_t streamHandle) {
@@ -228,7 +246,7 @@
return AAUDIO_ERROR_INVALID_HANDLE;
}
aaudio_result_t result = serviceStream->stop();
- return result;
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::flushStream(aaudio_handle_t streamHandle) {
@@ -237,48 +255,51 @@
ALOGE("flushStream(), illegal stream handle = 0x%0x", streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
}
- return serviceStream->flush();
+ aaudio_result_t result = serviceStream->flush();
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::registerAudioThread(aaudio_handle_t streamHandle,
pid_t clientThreadId,
int64_t periodNanoseconds) {
+ aaudio_result_t result = AAUDIO_OK;
sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
ALOGE("registerAudioThread(), illegal stream handle = 0x%0x", streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
}
if (serviceStream->getRegisteredThread() != AAudioServiceStreamBase::ILLEGAL_THREAD_ID) {
- ALOGE("registerAudioThread(), thread already registered");
- return AAUDIO_ERROR_INVALID_STATE;
- }
-
- const pid_t ownerPid = IPCThreadState::self()->getCallingPid(); // TODO review
- serviceStream->setRegisteredThread(clientThreadId);
- int err = android::requestPriority(ownerPid, clientThreadId,
- DEFAULT_AUDIO_PRIORITY, true /* isForApp */);
- if (err != 0){
- ALOGE("registerAudioThread(%d) failed, errno = %d, priority = %d",
- clientThreadId, errno, DEFAULT_AUDIO_PRIORITY);
- return AAUDIO_ERROR_INTERNAL;
+ ALOGE("AAudioService::registerAudioThread(), thread already registered");
+ result = AAUDIO_ERROR_INVALID_STATE;
} else {
- return AAUDIO_OK;
+ const pid_t ownerPid = IPCThreadState::self()->getCallingPid(); // TODO review
+ serviceStream->setRegisteredThread(clientThreadId);
+ int err = android::requestPriority(ownerPid, clientThreadId,
+ DEFAULT_AUDIO_PRIORITY, true /* isForApp */);
+ if (err != 0) {
+ ALOGE("AAudioService::registerAudioThread(%d) failed, errno = %d, priority = %d",
+ clientThreadId, errno, DEFAULT_AUDIO_PRIORITY);
+ result = AAUDIO_ERROR_INTERNAL;
+ }
}
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::unregisterAudioThread(aaudio_handle_t streamHandle,
pid_t clientThreadId) {
+ aaudio_result_t result = AAUDIO_OK;
sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
ALOGE("unregisterAudioThread(), illegal stream handle = 0x%0x", streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
}
if (serviceStream->getRegisteredThread() != clientThreadId) {
- ALOGE("unregisterAudioThread(), wrong thread");
- return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ ALOGE("AAudioService::unregisterAudioThread(), wrong thread");
+ result = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ } else {
+ serviceStream->setRegisteredThread(0);
}
- serviceStream->setRegisteredThread(0);
- return AAUDIO_OK;
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::startClient(aaudio_handle_t streamHandle,
@@ -289,7 +310,8 @@
ALOGE("startClient(), illegal stream handle = 0x%0x", streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
}
- return serviceStream->startClient(client, clientHandle);
+ aaudio_result_t result = serviceStream->startClient(client, clientHandle);
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::stopClient(aaudio_handle_t streamHandle,
@@ -299,5 +321,6 @@
ALOGE("stopClient(), illegal stream handle = 0x%0x", streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
}
- return serviceStream->stopClient(clientHandle);
+ aaudio_result_t result = serviceStream->stopClient(clientHandle);
+ return checkForPendingClose(serviceStream, result);
}
diff --git a/services/oboeservice/AAudioService.h b/services/oboeservice/AAudioService.h
index eef0824..bdd9e0b 100644
--- a/services/oboeservice/AAudioService.h
+++ b/services/oboeservice/AAudioService.h
@@ -94,9 +94,15 @@
aaudio::aaudio_handle_t streamHandle);
- android::AudioClient mAudioClient;
- aaudio::AAudioStreamTracker mStreamTracker;
+ bool releaseStream(const sp<aaudio::AAudioServiceStreamBase> &serviceStream);
+
+ aaudio_result_t checkForPendingClose(const sp<aaudio::AAudioServiceStreamBase> &serviceStream,
+ aaudio_result_t defaultResult);
+
+ android::AudioClient mAudioClient;
+
+ aaudio::AAudioStreamTracker mStreamTracker;
enum constants {
DEFAULT_AUDIO_PRIORITY = 2
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index 635b45c..53d2860 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -402,3 +402,13 @@
void AAudioServiceStreamBase::onVolumeChanged(float volume) {
sendServiceEvent(AAUDIO_SERVICE_EVENT_VOLUME, volume);
}
+
+int32_t AAudioServiceStreamBase::incrementServiceReferenceCount() {
+ std::lock_guard<std::mutex> lock(mCallingCountLock);
+ return ++mCallingCount;
+}
+
+int32_t AAudioServiceStreamBase::decrementServiceReferenceCount() {
+ std::lock_guard<std::mutex> lock(mCallingCountLock);
+ return --mCallingCount;
+}
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index 29987f6..5f5bb98 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -199,6 +199,26 @@
return mFlowing;
}
+ /**
+ * Atomically increment the number of active references to the stream by AAudioService.
+ * @return value after the increment
+ */
+ int32_t incrementServiceReferenceCount();
+
+ /**
+ * Atomically decrement the number of active references to the stream by AAudioService.
+ * @return value after the decrement
+ */
+ int32_t decrementServiceReferenceCount();
+
+ bool isCloseNeeded() const {
+ return mCloseNeeded.load();
+ }
+
+ void setCloseNeeded(bool needed) {
+ mCloseNeeded.store(needed);
+ }
+
protected:
/**
@@ -256,8 +276,11 @@
private:
aaudio_handle_t mHandle = -1;
-
bool mFlowing = false;
+
+ std::mutex mCallingCountLock;
+ std::atomic<int32_t> mCallingCount{0};
+ std::atomic<bool> mCloseNeeded{false};
};
} /* namespace aaudio */